MALI: utgard: upgrade DDK to r7p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory_cow.c
1 /*\r
2  * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */\r
10 #include <linux/mm.h>\r
11 #include <linux/list.h>\r
12 #include <linux/mm_types.h>\r
13 #include <linux/fs.h>\r
14 #include <linux/dma-mapping.h>\r
15 #include <linux/highmem.h>\r
16 #include <asm/cacheflush.h>\r
17 #include <linux/sched.h>\r
18 #ifdef CONFIG_ARM\r
19 #include <asm/outercache.h>\r
20 #endif\r
21 #include <asm/dma-mapping.h>\r
22 \r
23 #include "mali_memory.h"\r
24 #include "mali_kernel_common.h"\r
25 #include "mali_uk_types.h"\r
26 #include "mali_osk.h"\r
27 #include "mali_kernel_linux.h"\r
28 #include "mali_memory_cow.h"\r
29 #include "mali_memory_block_alloc.h"\r
30 #include "mali_memory_swap_alloc.h"\r
31 \r
32 /**\r
33 * allocate pages for COW backend and flush cache\r
34 */\r
35 static struct page *mali_mem_cow_alloc_page(void)\r
36 \r
37 {\r
38         mali_mem_os_mem os_mem;\r
39         struct mali_page_node *node;\r
40         struct page *new_page;\r
41 \r
42         int ret = 0;\r
43         /* allocate pages from os mem */\r
44         ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);\r
45 \r
46         if (ret) {\r
47                 return NULL;\r
48         }\r
49 \r
50         MALI_DEBUG_ASSERT(1 == os_mem.count);\r
51 \r
52         node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);\r
53         new_page = node->page;\r
54         node->page = NULL;\r
55         list_del(&node->list);\r
56         kfree(node);\r
57 \r
58         return new_page;\r
59 }\r
60 \r
61 \r
62 static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,\r
63                 u32 target_offset,\r
64                 u32 target_size)\r
65 {\r
66         MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||\r
67                           MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);\r
68 \r
69         if (MALI_MEM_OS == target_bk->type) {\r
70                 MALI_DEBUG_ASSERT(&target_bk->os_mem);\r
71                 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);\r
72                 return &target_bk->os_mem.pages;\r
73         } else if (MALI_MEM_COW == target_bk->type) {\r
74                 MALI_DEBUG_ASSERT(&target_bk->cow_mem);\r
75                 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);\r
76                 return  &target_bk->cow_mem.pages;\r
77         } else if (MALI_MEM_BLOCK == target_bk->type) {\r
78                 MALI_DEBUG_ASSERT(&target_bk->block_mem);\r
79                 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);\r
80                 return  &target_bk->block_mem.pfns;\r
81         } else if (MALI_MEM_SWAP == target_bk->type) {\r
82                 MALI_DEBUG_ASSERT(&target_bk->swap_mem);\r
83                 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);\r
84                 return  &target_bk->swap_mem.pages;\r
85         }\r
86 \r
87         return NULL;\r
88 }\r
89 \r
90 /**\r
91 * Do COW for os memory - support do COW for memory from bank memory\r
92 * The range_start/size can be zero, which means it will call cow_modify_range\r
93 * latter.\r
94 * This function allocate new pages for COW backend from os mem for a modified range\r
95 * It will keep the page which not in the modified range and Add ref to it\r
96 *\r
97 * @target_bk - target allocation's backend(the allocation need to do COW)\r
98 * @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
99 * @target_size - size of target allocation to do COW (for support memory bank)\r
100 * @backend -COW backend\r
101 * @range_start - offset of modified range (4K align)\r
102 * @range_size - size of modified range\r
103 */\r
104 _mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,\r
105                 u32 target_offset,\r
106                 u32 target_size,\r
107                 mali_mem_backend *backend,\r
108                 u32 range_start,\r
109                 u32 range_size)\r
110 {\r
111         mali_mem_cow *cow = &backend->cow_mem;\r
112         struct mali_page_node *m_page, *m_tmp, *page_node;\r
113         int target_page = 0;\r
114         struct page *new_page;\r
115         struct list_head *pages = NULL;\r
116 \r
117         pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
118 \r
119         if (NULL == pages) {\r
120                 MALI_DEBUG_PRINT_ERROR(("No memory page  need to cow ! \n"));\r
121                 return _MALI_OSK_ERR_FAULT;\r
122         }\r
123 \r
124         MALI_DEBUG_ASSERT(0 == cow->count);\r
125 \r
126         INIT_LIST_HEAD(&cow->pages);\r
127         mutex_lock(&target_bk->mutex);\r
128         list_for_each_entry_safe(m_page, m_tmp, pages, list) {\r
129                 /* add page from (target_offset,target_offset+size) to cow backend */\r
130                 if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&\r
131                     (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {\r
132 \r
133                         /* allocate a new page node, alway use OS memory for COW */\r
134                         page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);\r
135 \r
136                         if (NULL == page_node) {\r
137                                 mutex_unlock(&target_bk->mutex);\r
138                                 goto error;\r
139                         }\r
140 \r
141                         INIT_LIST_HEAD(&page_node->list);\r
142 \r
143                         /* check if in the modified range*/\r
144                         if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
145                             (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
146                                 /* need to allocate a new page */\r
147                                 /* To simplify the case, All COW memory is allocated from os memory ?*/\r
148                                 new_page = mali_mem_cow_alloc_page();\r
149 \r
150                                 if (NULL == new_page) {\r
151                                         kfree(page_node);\r
152                                         mutex_unlock(&target_bk->mutex);\r
153                                         goto error;\r
154                                 }\r
155 \r
156                                 _mali_page_node_add_page(page_node, new_page);\r
157                         } else {\r
158                                 /*Add Block memory case*/\r
159                                 if (m_page->type != MALI_PAGE_NODE_BLOCK) {\r
160                                         _mali_page_node_add_page(page_node, m_page->page);\r
161                                 } else {\r
162                                         page_node->type = MALI_PAGE_NODE_BLOCK;\r
163                                         _mali_page_node_add_block_item(page_node, m_page->blk_it);\r
164                                 }\r
165 \r
166                                 /* add ref to this page */\r
167                                 _mali_page_node_ref(m_page);\r
168                         }\r
169 \r
170                         /* add it to COW backend page list */\r
171                         list_add_tail(&page_node->list, &cow->pages);\r
172                         cow->count++;\r
173                 }\r
174                 target_page++;\r
175         }\r
176         mutex_unlock(&target_bk->mutex);\r
177         return _MALI_OSK_ERR_OK;\r
178 error:\r
179         mali_mem_cow_release(backend, MALI_FALSE);\r
180         return _MALI_OSK_ERR_FAULT;\r
181 }\r
182 \r
183 _mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,\r
184                 u32 target_offset,\r
185                 u32 target_size,\r
186                 mali_mem_backend *backend,\r
187                 u32 range_start,\r
188                 u32 range_size)\r
189 {\r
190         mali_mem_cow *cow = &backend->cow_mem;\r
191         struct mali_page_node *m_page, *m_tmp, *page_node;\r
192         int target_page = 0;\r
193         struct mali_swap_item *swap_item;\r
194         struct list_head *pages = NULL;\r
195 \r
196         pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
197         if (NULL == pages) {\r
198                 MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));\r
199                 return _MALI_OSK_ERR_FAULT;\r
200         }\r
201 \r
202         MALI_DEBUG_ASSERT(0 == cow->count);\r
203 \r
204         INIT_LIST_HEAD(&cow->pages);\r
205         mutex_lock(&target_bk->mutex);\r
206 \r
207         backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;\r
208 \r
209         list_for_each_entry_safe(m_page, m_tmp, pages, list) {\r
210                 /* add page from (target_offset,target_offset+size) to cow backend */\r
211                 if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&\r
212                     (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {\r
213 \r
214                         /* allocate a new page node, use swap memory for COW memory swap cowed flag. */\r
215                         page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);\r
216 \r
217                         if (NULL == page_node) {\r
218                                 mutex_unlock(&target_bk->mutex);\r
219                                 goto error;\r
220                         }\r
221 \r
222                         /* check if in the modified range*/\r
223                         if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
224                             (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
225                                 /* need to allocate a new page */\r
226                                 /* To simplify the case, All COW memory is allocated from os memory ?*/\r
227                                 swap_item = mali_mem_swap_alloc_swap_item();\r
228 \r
229                                 if (NULL == swap_item) {\r
230                                         kfree(page_node);\r
231                                         mutex_unlock(&target_bk->mutex);\r
232                                         goto error;\r
233                                 }\r
234 \r
235                                 swap_item->idx = mali_mem_swap_idx_alloc();\r
236 \r
237                                 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {\r
238                                         MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));\r
239                                         kfree(page_node);\r
240                                         kfree(swap_item);\r
241                                         mutex_unlock(&target_bk->mutex);\r
242                                         goto error;\r
243                                 }\r
244 \r
245                                 _mali_page_node_add_swap_item(page_node, swap_item);\r
246                         } else {\r
247                                 _mali_page_node_add_swap_item(page_node, m_page->swap_it);\r
248 \r
249                                 /* add ref to this page */\r
250                                 _mali_page_node_ref(m_page);\r
251                         }\r
252 \r
253                         list_add_tail(&page_node->list, &cow->pages);\r
254                         cow->count++;\r
255                 }\r
256                 target_page++;\r
257         }\r
258         mutex_unlock(&target_bk->mutex);\r
259 \r
260         return _MALI_OSK_ERR_OK;\r
261 error:\r
262         mali_mem_swap_release(backend, MALI_FALSE);\r
263         return _MALI_OSK_ERR_FAULT;\r
264 \r
265 }\r
266 \r
267 \r
268 _mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)\r
269 {\r
270         if (node->type == MALI_PAGE_NODE_OS) {\r
271                 return mali_mem_os_put_page(node->page);\r
272         } else if (node->type == MALI_PAGE_NODE_BLOCK) {\r
273                 return mali_mem_block_unref_node(node);\r
274         } else if (node->type == MALI_PAGE_NODE_SWAP) {\r
275                 return _mali_mem_swap_put_page_node(node);\r
276         } else\r
277                 MALI_DEBUG_ASSERT(0);\r
278         return _MALI_OSK_ERR_FAULT;\r
279 }\r
280 \r
281 \r
282 /**\r
283 * Modify a range of a exist COW backend\r
284 * @backend -COW backend\r
285 * @range_start - offset of modified range (4K align)\r
286 * @range_size - size of modified range(in byte)\r
287 */\r
288 _mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,\r
289                 u32 range_start,\r
290                 u32 range_size)\r
291 {\r
292         mali_mem_allocation *alloc = NULL;\r
293         struct mali_session_data *session;\r
294         mali_mem_cow *cow = &backend->cow_mem;\r
295         struct mali_page_node *m_page, *m_tmp;\r
296         LIST_HEAD(pages);\r
297         struct page *new_page;\r
298         u32 count = 0;\r
299         s32 change_pages_nr = 0;\r
300         _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;\r
301 \r
302         if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
303         if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
304 \r
305         alloc = backend->mali_allocation;\r
306         MALI_DEBUG_ASSERT_POINTER(alloc);\r
307 \r
308         session = alloc->session;\r
309         MALI_DEBUG_ASSERT_POINTER(session);\r
310 \r
311         MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
312         MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);\r
313 \r
314         mutex_lock(&backend->mutex);\r
315 \r
316         /* free pages*/\r
317         list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {\r
318 \r
319                 /* check if in the modified range*/\r
320                 if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
321                     (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
322                         if (MALI_PAGE_NODE_SWAP != m_page->type) {\r
323                                 new_page = mali_mem_cow_alloc_page();\r
324 \r
325                                 if (NULL == new_page) {\r
326                                         goto error;\r
327                                 }\r
328                                 if (1 != _mali_page_node_get_ref_count(m_page))\r
329                                         change_pages_nr++;\r
330                                 /* unref old page*/\r
331                                 _mali_osk_mutex_wait(session->cow_lock);\r
332                                 if (_mali_mem_put_page_node(m_page)) {\r
333                                         __free_page(new_page);\r
334                                         _mali_osk_mutex_signal(session->cow_lock);\r
335                                         goto error;\r
336                                 }\r
337                                 _mali_osk_mutex_signal(session->cow_lock);\r
338                                 /* add new page*/\r
339                                 /* always use OS for COW*/\r
340                                 m_page->type = MALI_PAGE_NODE_OS;\r
341                                 _mali_page_node_add_page(m_page, new_page);\r
342                         } else {\r
343                                 struct mali_swap_item *swap_item;\r
344 \r
345                                 swap_item = mali_mem_swap_alloc_swap_item();\r
346 \r
347                                 if (NULL == swap_item) {\r
348                                         goto error;\r
349                                 }\r
350 \r
351                                 swap_item->idx = mali_mem_swap_idx_alloc();\r
352 \r
353                                 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {\r
354                                         MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));\r
355                                         kfree(swap_item);\r
356                                         goto error;\r
357                                 }\r
358 \r
359                                 if (1 != _mali_page_node_get_ref_count(m_page)) {\r
360                                         change_pages_nr++;\r
361                                 }\r
362 \r
363                                 if (_mali_mem_put_page_node(m_page)) {\r
364                                         mali_mem_swap_free_swap_item(swap_item);\r
365                                         goto error;\r
366                                 }\r
367 \r
368                                 _mali_page_node_add_swap_item(m_page, swap_item);\r
369                         }\r
370                 }\r
371                 count++;\r
372         }\r
373         cow->change_pages_nr  = change_pages_nr;\r
374 \r
375         MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);\r
376 \r
377         /* ZAP cpu mapping(modified range), and do cpu mapping here if need */\r
378         if (NULL != alloc->cpu_mapping.vma) {\r
379                 MALI_DEBUG_ASSERT(0 != alloc->backend_handle);\r
380                 MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);\r
381                 MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);\r
382 \r
383                 if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {\r
384                         zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);\r
385 \r
386                         ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start  + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);\r
387 \r
388                         if (unlikely(ret != _MALI_OSK_ERR_OK)) {\r
389                                 MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));\r
390                                 ret =  _MALI_OSK_ERR_FAULT;\r
391                         }\r
392                 } else {\r
393                         /* used to trigger page fault for swappable cowed memory. */\r
394                         alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;\r
395                         alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;\r
396 \r
397                         zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);\r
398                         /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */\r
399                         alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;\r
400                         alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;\r
401                 }\r
402         }\r
403 \r
404 error:\r
405         mutex_unlock(&backend->mutex);\r
406         return ret;\r
407 \r
408 }\r
409 \r
410 \r
411 /**\r
412 * Allocate pages for COW backend\r
413 * @alloc  -allocation for COW allocation\r
414 * @target_bk - target allocation's backend(the allocation need to do COW)\r
415 * @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
416 * @target_size - size of target allocation to do COW (for support memory bank)(in byte)\r
417 * @backend -COW backend\r
418 * @range_start - offset of modified range (4K align)\r
419 * @range_size - size of modified range(in byte)\r
420 */\r
421 _mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,\r
422                                        u32 target_offset,\r
423                                        u32 target_size,\r
424                                        mali_mem_backend *backend,\r
425                                        u32 range_start,\r
426                                        u32 range_size)\r
427 {\r
428         struct mali_session_data *session = backend->mali_allocation->session;\r
429 \r
430         MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
431 \r
432         /* size & offset must be a multiple of the system page size */\r
433         if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
434         if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
435         if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
436         if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
437 \r
438         /* check backend type */\r
439         MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
440 \r
441         switch (target_bk->type) {\r
442         case MALI_MEM_OS:\r
443         case MALI_MEM_BLOCK:\r
444                 return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
445                 break;\r
446         case MALI_MEM_COW:\r
447                 if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {\r
448                         return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
449                 } else {\r
450                         return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
451                 }\r
452                 break;\r
453         case MALI_MEM_SWAP:\r
454                 return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
455                 break;\r
456         case MALI_MEM_EXTERNAL:\r
457                 /*NOT support yet*/\r
458                 MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));\r
459                 return _MALI_OSK_ERR_UNSUPPORTED;\r
460                 break;\r
461         case MALI_MEM_DMA_BUF:\r
462                 /*NOT support yet*/\r
463                 MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));\r
464                 return _MALI_OSK_ERR_UNSUPPORTED;\r
465                 break;\r
466         case MALI_MEM_UMP:\r
467                 /*NOT support yet*/\r
468                 MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));\r
469                 return _MALI_OSK_ERR_UNSUPPORTED;\r
470                 break;\r
471         default:\r
472                 /*Not support yet*/\r
473                 MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));\r
474                 return _MALI_OSK_ERR_UNSUPPORTED;\r
475                 break;\r
476         }\r
477         return _MALI_OSK_ERR_OK;\r
478 }\r
479 \r
480 \r
481 /**\r
482 * Map COW backend memory to mali\r
483 * Support OS/BLOCK for mali_page_node\r
484 */\r
485 int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)\r
486 {\r
487         mali_mem_allocation *cow_alloc;\r
488         struct mali_page_node *m_page;\r
489         struct mali_session_data *session;\r
490         struct mali_page_directory *pagedir;\r
491         u32 virt, start;\r
492 \r
493         cow_alloc = mem_bkend->mali_allocation;\r
494         virt = cow_alloc->mali_vma_node.vm_node.start;\r
495         start = virt;\r
496 \r
497         MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
498         MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
499         MALI_DEBUG_ASSERT_POINTER(cow_alloc);\r
500 \r
501         session = cow_alloc->session;\r
502         pagedir = session->page_directory;\r
503         MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
504         list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {\r
505                 if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {\r
506                         dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);\r
507 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)\r
508                         MALI_DEBUG_ASSERT(0 == (phys >> 32));\r
509 #endif\r
510                         mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,\r
511                                                 MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);\r
512                 }\r
513                 virt += MALI_MMU_PAGE_SIZE;\r
514         }\r
515         return 0;\r
516 }\r
517 \r
518 /**\r
519 * Map COW backend to cpu\r
520 * support OS/BLOCK memory\r
521 */\r
522 int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)\r
523 {\r
524         mali_mem_cow *cow = &mem_bkend->cow_mem;\r
525         struct mali_page_node *m_page;\r
526         int ret;\r
527         unsigned long addr = vma->vm_start;\r
528         MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
529 \r
530         list_for_each_entry(m_page, &cow->pages, list) {\r
531                 /* We should use vm_insert_page, but it does a dcache\r
532                  * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.\r
533                 ret = vm_insert_page(vma, addr, page);\r
534                 */\r
535                 ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));\r
536 \r
537                 if (unlikely(0 != ret)) {\r
538                         return ret;\r
539                 }\r
540                 addr += _MALI_OSK_MALI_PAGE_SIZE;\r
541         }\r
542 \r
543         return 0;\r
544 }\r
545 \r
546 /**\r
547 * Map some pages(COW backend) to CPU vma@vaddr\r
548 *@ mem_bkend - COW backend\r
549 *@ vma\r
550 *@ vaddr -start CPU vaddr mapped to\r
551 *@ num - max number of pages to map to CPU vaddr\r
552 */\r
553 _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,\r
554                 struct vm_area_struct *vma,\r
555                 unsigned long vaddr,\r
556                 int num)\r
557 {\r
558         mali_mem_cow *cow = &mem_bkend->cow_mem;\r
559         struct mali_page_node *m_page;\r
560         int ret;\r
561         int offset;\r
562         int count ;\r
563         unsigned long vstart = vma->vm_start;\r
564         count = 0;\r
565         MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
566         MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);\r
567         MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);\r
568         offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;\r
569 \r
570         list_for_each_entry(m_page, &cow->pages, list) {\r
571                 if ((count >= offset) && (count < offset + num)) {\r
572                         ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));\r
573 \r
574                         if (unlikely(0 != ret)) {\r
575                                 if (count == offset) {\r
576                                         return _MALI_OSK_ERR_FAULT;\r
577                                 } else {\r
578                                         /* ret is EBUSY when page isn't in modify range, but now it's OK*/\r
579                                         return _MALI_OSK_ERR_OK;\r
580                                 }\r
581                         }\r
582                         vaddr += _MALI_OSK_MALI_PAGE_SIZE;\r
583                 }\r
584                 count++;\r
585         }\r
586         return _MALI_OSK_ERR_OK;\r
587 }\r
588 \r
589 /**\r
590 * Release COW backend memory\r
591 * free it directly(put_page--unref page), not put into pool\r
592 */\r
593 u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)\r
594 {\r
595         mali_mem_allocation *alloc;\r
596         struct mali_session_data *session;\r
597         u32 free_pages_nr = 0;\r
598         MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
599         MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
600         alloc = mem_bkend->mali_allocation;\r
601         MALI_DEBUG_ASSERT_POINTER(alloc);\r
602 \r
603         session = alloc->session;\r
604         MALI_DEBUG_ASSERT_POINTER(session);\r
605 \r
606         if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {\r
607                 /* Unmap the memory from the mali virtual address space. */\r
608                 if (MALI_TRUE == is_mali_mapped)\r
609                         mali_mem_os_mali_unmap(alloc);\r
610                 /* free cow backend list*/\r
611                 _mali_osk_mutex_wait(session->cow_lock);\r
612                 free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);\r
613                 _mali_osk_mutex_signal(session->cow_lock);\r
614 \r
615                 free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);\r
616 \r
617                 MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));\r
618         } else {\r
619                 free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);\r
620         }\r
621 \r
622 \r
623         MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,\r
624                              free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));\r
625 \r
626         mem_bkend->cow_mem.count = 0;\r
627         return free_pages_nr;\r
628 }\r
629 \r
630 \r
631 /* Dst node could os node or swap node. */\r
632 void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)\r
633 {\r
634         void *dst, *src;\r
635         struct page *dst_page;\r
636         dma_addr_t dma_addr;\r
637 \r
638         MALI_DEBUG_ASSERT(src_node != NULL);\r
639         MALI_DEBUG_ASSERT(dst_node != NULL);\r
640         MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS\r
641                           || dst_node->type == MALI_PAGE_NODE_SWAP);\r
642 \r
643         if (dst_node->type == MALI_PAGE_NODE_OS) {\r
644                 dst_page = dst_node->page;\r
645         } else {\r
646                 dst_page = dst_node->swap_it->page;\r
647         }\r
648 \r
649         dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),\r
650                        _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
651 \r
652         /* map it , and copy the content*/\r
653         dst = kmap_atomic(dst_page);\r
654 \r
655         if (src_node->type == MALI_PAGE_NODE_OS ||\r
656             src_node->type == MALI_PAGE_NODE_SWAP) {\r
657                 struct page *src_page;\r
658 \r
659                 if (src_node->type == MALI_PAGE_NODE_OS) {\r
660                         src_page = src_node->page;\r
661                 } else {\r
662                         src_page = src_node->swap_it->page;\r
663                 }\r
664 \r
665                 /* Clear and invaliate cache */\r
666                 /* In ARM architecture, speculative read may pull stale data into L1 cache\r
667                  * for kernel linear mapping page table. DMA_BIDIRECTIONAL could\r
668                  * invalidate the L1 cache so that following read get the latest data\r
669                 */\r
670                 dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),\r
671                                _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
672 \r
673                 src = kmap_atomic(src_page);\r
674                 memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
675                 kunmap_atomic(src);\r
676                 dma_addr = dma_map_page(&mali_platform_device->dev, src_page,\r
677                                         0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
678 \r
679                 if (src_node->type == MALI_PAGE_NODE_SWAP) {\r
680                         src_node->swap_it->dma_addr = dma_addr;\r
681                 }\r
682         } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {\r
683                 /*\r
684                 * use ioremap to map src for BLOCK memory\r
685                 */\r
686                 src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);\r
687                 memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
688                 iounmap(src);\r
689         }\r
690         kunmap_atomic(dst);\r
691         dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,\r
692                                 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);\r
693 \r
694         if (dst_node->type == MALI_PAGE_NODE_SWAP) {\r
695                 dst_node->swap_it->dma_addr = dma_addr;\r
696         }\r
697 }\r
698 \r
699 \r
700 /*\r
701 * allocate page on demand when CPU access it,\r
702 * THis used in page fault handler\r
703 */\r
704 _mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)\r
705 {\r
706         struct page *new_page = NULL;\r
707         struct mali_page_node *new_node = NULL;\r
708         int i = 0;\r
709         struct mali_page_node *m_page, *found_node = NULL;\r
710         struct  mali_session_data *session = NULL;\r
711         mali_mem_cow *cow = &mem_bkend->cow_mem;\r
712         MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
713         MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);\r
714         MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));\r
715 \r
716         /* allocate new page here */\r
717         new_page = mali_mem_cow_alloc_page();\r
718         if (!new_page)\r
719                 return _MALI_OSK_ERR_NOMEM;\r
720 \r
721         new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);\r
722         if (!new_node) {\r
723                 __free_page(new_page);\r
724                 return _MALI_OSK_ERR_NOMEM;\r
725         }\r
726 \r
727         /* find the page in backend*/\r
728         list_for_each_entry(m_page, &cow->pages, list) {\r
729                 if (i == offset_page) {\r
730                         found_node = m_page;\r
731                         break;\r
732                 }\r
733                 i++;\r
734         }\r
735         MALI_DEBUG_ASSERT(found_node);\r
736         if (NULL == found_node) {\r
737                 __free_page(new_page);\r
738                 kfree(new_node);\r
739                 return _MALI_OSK_ERR_ITEM_NOT_FOUND;\r
740         }\r
741 \r
742         _mali_page_node_add_page(new_node, new_page);\r
743 \r
744         /* Copy the src page's content to new page */\r
745         _mali_mem_cow_copy_page(found_node, new_node);\r
746 \r
747         MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);\r
748         session = mem_bkend->mali_allocation->session;\r
749         MALI_DEBUG_ASSERT_POINTER(session);\r
750         if (1 != _mali_page_node_get_ref_count(found_node)) {\r
751                 atomic_add(1, &session->mali_mem_allocated_pages);\r
752                 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {\r
753                         session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;\r
754                 }\r
755                 mem_bkend->cow_mem.change_pages_nr++;\r
756         }\r
757 \r
758         _mali_osk_mutex_wait(session->cow_lock);\r
759         if (_mali_mem_put_page_node(found_node)) {\r
760                 __free_page(new_page);\r
761                 kfree(new_node);\r
762                 _mali_osk_mutex_signal(session->cow_lock);\r
763                 return _MALI_OSK_ERR_NOMEM;\r
764         }\r
765         _mali_osk_mutex_signal(session->cow_lock);\r
766 \r
767         list_replace(&found_node->list, &new_node->list);\r
768 \r
769         kfree(found_node);\r
770 \r
771         /* map to GPU side*/\r
772         _mali_osk_mutex_wait(session->memory_lock);\r
773         mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);\r
774         _mali_osk_mutex_signal(session->memory_lock);\r
775         return _MALI_OSK_ERR_OK;\r
776 }\r