ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory_block_alloc.c
index 03e81b886543a9212dd825ebb49c25b46f9114cb..deb2ee84f6e599874c05a04fdb514a1917750493 100755 (executable)
@@ -1,48 +1,71 @@
 /*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
+
 #include "mali_kernel_common.h"
 #include "mali_memory.h"
 #include "mali_memory_block_alloc.h"
 #include "mali_osk.h"
 #include <linux/mutex.h>
-#define MALI_BLOCK_SIZE (256UL * 1024UL)  /* 256 kB, remember to keep the ()s */
 
-struct block_info {
-       struct block_info *next;
-};
 
-typedef struct block_info block_info;
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+       return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
 
 
-typedef struct block_allocator {
-       struct mutex mutex;
-       block_info *all_blocks;
-       block_info *first_free;
-       u32 base;
-       u32 cpu_usage_adjust;
-       u32 num_blocks;
-       u32 free_blocks;
-} block_allocator;
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+       return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
 
-static block_allocator *mali_mem_block_gobal_allocator = NULL;
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
 
-MALI_STATIC_INLINE u32 get_phys(block_allocator *info, block_info *block)
+u32 mali_mem_block_add_ref(mali_page_node *node)
 {
-       return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+       return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
 }
 
-static mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size)
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
 {
-       block_allocator *info;
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+       return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+       mali_block_allocator *info;
        u32 usable_size;
        u32 num_blocks;
+       mali_page_node *m_node;
+       mali_block_item *mali_blk_items = NULL;
+       int i = 0;
 
        usable_size = size & ~(MALI_BLOCK_SIZE - 1);
        MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
@@ -55,246 +78,253 @@ static mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32
                return NULL;
        }
 
-       info = _mali_osk_malloc(sizeof(block_allocator));
+       info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
        if (NULL != info) {
-               mutex_init(&info->mutex);
-               info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
-               if (NULL != info->all_blocks) {
-                       u32 i;
-                       info->first_free = NULL;
-                       info->num_blocks = num_blocks;
-                       info->free_blocks = num_blocks;
-
-                       info->base = base_address;
-                       info->cpu_usage_adjust = cpu_usage_adjust;
-
-                       for (i = 0; i < num_blocks; i++) {
-                               info->all_blocks[i].next = info->first_free;
-                               info->first_free = &info->all_blocks[i];
+               INIT_LIST_HEAD(&info->free);
+               spin_lock_init(&info->sp_lock);
+               info->total_num = num_blocks;
+               mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+               if (mali_blk_items) {
+                       info->items = mali_blk_items;
+                       /* add blocks(4k size) to free list*/
+                       for (i = 0 ; i < num_blocks ; i++) {
+                               /* add block information*/
+                               mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+                               /* add  to free list */
+                               m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                               if (m_node == NULL)
+                                       goto fail;
+                               _mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+                               list_add_tail(&m_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
                        }
-
-                       return (mali_mem_allocator *)info;
+                       return info;
                }
-               _mali_osk_free(info);
        }
-
+fail:
+       mali_mem_block_allocator_destroy();
        return NULL;
 }
 
-void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator)
+void mali_mem_block_allocator_destroy(void)
 {
-       block_allocator *info = (block_allocator *)allocator;
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
+       MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
 
-       info = mali_mem_block_gobal_allocator;
-       if (NULL == info) return;
+       if (NULL == info)
+               return;
 
-       MALI_DEBUG_ASSERT_POINTER(info);
+       list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               list_del(&m_page->list);
+               kfree(m_page);
+       }
 
-       _mali_osk_free(info->all_blocks);
+       _mali_osk_free(info->items);
        _mali_osk_free(info);
 }
 
-static void mali_mem_block_mali_map(mali_mem_allocation *descriptor, u32 phys, u32 virt, u32 size)
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
 {
-       struct mali_page_directory *pagedir = descriptor->session->page_directory;
-       u32 prop = descriptor->mali_mapping.properties;
-       u32 offset = 0;
-
-       while (size) {
-               mali_mmu_pagedir_update(pagedir, virt + offset, phys + offset, MALI_MMU_PAGE_SIZE, prop);
-
-               size -= MALI_MMU_PAGE_SIZE;
-               offset += MALI_MMU_PAGE_SIZE;
-       }
+       mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_block_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+       mutex_unlock(&mem_bkend->mutex);
+       return free_pages_nr;
 }
 
-static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust)
-{
-       u32 virt = vma->vm_start + mapping_offset;
-       u32 cpu_phys = mali_phys + cpu_usage_adjust;
-       u32 offset = 0;
-       int ret;
 
-       while (size) {
-               ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset));
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
 
-               if (unlikely(ret)) {
-                       MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n"));
-                       return 1;
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+       /*do some init */
+       INIT_LIST_HEAD(&block_mem->pfns);
+
+       spin_lock(&info->sp_lock);
+       /*check if have enough space*/
+       if (atomic_read(&info->free_num) > page_count) {
+               list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+                       if (page_count > 0) {
+                               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+                               MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+                               list_move(&m_page->list, &block_mem->pfns);
+                               block_mem->count++;
+                               atomic_dec(&info->free_num);
+                               _mali_page_node_ref(m_page);
+                       } else {
+                               break;
+                       }
+                       page_count--;
                }
-
-               size -= MALI_MMU_PAGE_SIZE;
-               offset += MALI_MMU_PAGE_SIZE;
+       } else {
+               /* can't allocate from BLOCK memory*/
+               spin_unlock(&info->sp_lock);
+               return -1;
        }
 
+       spin_unlock(&info->sp_lock);
        return 0;
 }
 
-mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
 {
-       _mali_osk_errcode_t err;
-       mali_mem_allocation *descriptor;
-       block_allocator *info;
-       u32 left;
-       block_info *last_allocated = NULL;
-       block_allocator_allocation *ret_allocation;
-       u32 offset = 0;
-
-       size = ALIGN(size, MALI_BLOCK_SIZE);
-
-       info = mali_mem_block_gobal_allocator;
-       if (NULL == info) return NULL;
+       u32 free_pages_nr = 0;
 
-       left = size;
-       MALI_DEBUG_ASSERT(0 != left);
-
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
-       if (NULL == descriptor) {
-               return NULL;
-       }
+       free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+       block_mem->count = 0;
+       MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
 
-       descriptor->mali_mapping.addr = mali_addr;
-       descriptor->size = size;
-       descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
-       descriptor->cpu_mapping.ref = 1;
-
-       if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
-       } else {
-               /* Cached Mali memory mapping */
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
-               vma->vm_flags |= VM_SHARED;
-       }
-
-       ret_allocation = &descriptor->block_mem.mem;
-
-       ret_allocation->mapping_length = 0;
-
-       _mali_osk_mutex_wait(session->memory_lock);
-       mutex_lock(&info->mutex);
+       return free_pages_nr;
+}
 
-       if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
-               MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
-               mutex_unlock(&info->mutex);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
-               return NULL;
-       }
 
-       err = mali_mem_mali_map_prepare(descriptor);
-       if (_MALI_OSK_ERR_OK != err) {
-               mutex_unlock(&info->mutex);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
-               return NULL;
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       u32 free_pages_nr = 0;
+
+       if (info) {
+               spin_lock(&info->sp_lock);
+               list_for_each_entry_safe(m_page, m_tmp , list, list) {
+                       if (1 == _mali_page_node_get_ref_count(m_page)) {
+                               free_pages_nr++;
+                       }
+                       mali_mem_block_free_node(m_page);
+               }
+               spin_unlock(&info->sp_lock);
        }
+       return free_pages_nr;
+}
 
-       while ((left > 0) && (info->first_free)) {
-               block_info *block;
-               u32 phys_addr;
-               u32 current_mapping_size;
-
-               block = info->first_free;
-               info->first_free = info->first_free->next;
-               block->next = last_allocated;
-               last_allocated = block;
-
-               phys_addr = get_phys(info, block);
-
-               if (MALI_BLOCK_SIZE < left) {
-                       current_mapping_size = MALI_BLOCK_SIZE;
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /*Move to free list*/
+                       _mali_page_node_unref(node);
+                       list_move_tail(&node->list, &info->free);
+                       atomic_add(1, &info->free_num);
                } else {
-                       current_mapping_size = left;
+                       _mali_page_node_unref(node);
+                       list_del(&node->list);
+                       kfree(node);
                }
+       }
+}
 
-               mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
-               if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
-                       /* release all memory back to the pool */
-                       while (last_allocated) {
-                               /* This relinks every block we've just allocated back into the free-list */
-                               block = last_allocated->next;
-                               last_allocated->next = info->first_free;
-                               info->first_free = last_allocated;
-                               last_allocated = block;
-                       }
-
-                       mutex_unlock(&info->mutex);
-                       _mali_osk_mutex_signal(session->memory_lock);
-
-                       mali_mem_mali_map_free(descriptor);
-                       mali_mem_descriptor_destroy(descriptor);
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       mali_page_node *new_node;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /* allocate a  new node, Add to free list, keep the old node*/
+                       _mali_page_node_unref(node);
+                       new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                       if (new_node) {
+                               memcpy(new_node, node, sizeof(mali_page_node));
+                               list_add(&new_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
+                       } else
+                               return _MALI_OSK_ERR_FAULT;
 
-                       return NULL;
+               } else {
+                       _mali_page_node_unref(node);
                }
-
-               left -= current_mapping_size;
-               offset += current_mapping_size;
-               ret_allocation->mapping_length += current_mapping_size;
-
-               --info->free_blocks;
        }
+       return _MALI_OSK_ERR_OK;
+}
 
-       mutex_unlock(&info->mutex);
-       _mali_osk_mutex_signal(session->memory_lock);
-
-       MALI_DEBUG_ASSERT(0 == left);
 
-       /* Record all the information about this allocation */
-       ret_allocation->last_allocated = last_allocated;
-       ret_allocation->info = info;
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct mali_page_node *m_page;
+       dma_addr_t phys;
+       u32 virt = vaddr;
+       u32 prop = props;
+
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+               /* Verify that the "physical" address is 32-bit and
+                * usable for Mali, when on a system with bus addresses
+                * wider than 32-bit. */
+               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+               virt += MALI_MMU_PAGE_SIZE;
+       }
 
-       return descriptor;
+       return 0;
 }
 
-void mali_mem_block_release(mali_mem_allocation *descriptor)
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
 {
-       block_allocator *info = descriptor->block_mem.mem.info;
-       block_info *block, *next;
-       block_allocator_allocation *allocation = &descriptor->block_mem.mem;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type);
-
-       block = allocation->last_allocated;
-
-       MALI_DEBUG_ASSERT_POINTER(block);
-
-       /* unmap */
-       mali_mem_mali_map_free(descriptor);
-
-       mutex_lock(&info->mutex);
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
 
-       while (block) {
-               MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
 
-               next = block->next;
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+       int ret;
+       mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+       unsigned long addr = vma->vm_start;
+       struct mali_page_node *m_page;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
 
-               /* relink into free-list */
-               block->next = info->first_free;
-               info->first_free = block;
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
 
-               /* advance the loop */
-               block = next;
+               if (unlikely(0 != ret)) {
+                       return -EFAULT;
+               }
+               addr += _MALI_OSK_MALI_PAGE_SIZE;
 
-               ++info->free_blocks;
        }
 
-       mutex_unlock(&info->mutex);
+       return 0;
 }
 
-u32 mali_mem_block_allocator_stat(void)
-{
-       block_allocator *info = (block_allocator *)mali_mem_block_gobal_allocator;
-
-       if (NULL == info) return 0;
-
-       MALI_DEBUG_ASSERT_POINTER(info);
-
-       return (info->num_blocks - info->free_blocks) * MALI_BLOCK_SIZE;
-}
 
 _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
 {
-       mali_mem_allocator *allocator;
+       mali_block_allocator *allocator;
 
        /* Do the low level linux operation first */
 
@@ -305,7 +335,7 @@ _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 si
        }
 
        /* Create generic block allocator object to handle it */
-       allocator = mali_mem_block_allocator_create(start, 0 /* cpu_usage_adjust */, size);
+       allocator = mali_mem_block_allocator_create(start, size);
 
        if (NULL == allocator) {
                MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
@@ -313,7 +343,20 @@ _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 si
                MALI_ERROR(_MALI_OSK_ERR_FAULT);
        }
 
-       mali_mem_block_gobal_allocator = (block_allocator *)allocator;
+       mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
 
        return _MALI_OSK_ERR_OK;
 }
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+       return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+       mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(allocator);
+
+       return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}