drm/amdgpu: add lock for interval tree in vm
authorChunming Zhou <David1.Zhou@amd.com>
Fri, 13 Nov 2015 05:32:01 +0000 (13:32 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Nov 2015 16:40:55 +0000 (11:40 -0500)
Change-Id: I62b892a22af37b32e6b4aefca80a25cf45426ed2
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index a5692624070abcd3cdda9ad07c9814bec31b5efe..306f75700bf8e1fa5b5c6a7731645ad4a4d86f60 100644 (file)
@@ -954,6 +954,8 @@ struct amdgpu_vm {
 
        /* for id and flush management per ring */
        struct amdgpu_vm_id     ids[AMDGPU_MAX_RINGS];
+       /* for interval tree */
+       spinlock_t              it_lock;
 };
 
 struct amdgpu_vm_manager {
index 0bdbb2480f9bf9d9dbcb6b45288f91680fc38117..0513f3fed2c25625fb61833e20da44f6f73330b9 100644 (file)
@@ -1028,7 +1028,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        saddr /= AMDGPU_GPU_PAGE_SIZE;
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
+       spin_lock(&vm->it_lock);
        it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+       spin_unlock(&vm->it_lock);
        if (it) {
                struct amdgpu_bo_va_mapping *tmp;
                tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1055,7 +1057,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        mapping->flags = flags;
 
        list_add(&mapping->list, &bo_va->invalids);
+       spin_lock(&vm->it_lock);
        interval_tree_insert(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_map(bo_va, mapping);
 
        /* Make sure the page tables are allocated */
@@ -1101,7 +1105,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
 error_free:
        list_del(&mapping->list);
+       spin_lock(&vm->it_lock);
        interval_tree_remove(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
        kfree(mapping);
 
@@ -1151,7 +1157,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        }
 
        list_del(&mapping->list);
+       spin_lock(&vm->it_lock);
        interval_tree_remove(&mapping->it, &vm->va);
+       spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
        if (valid)
@@ -1187,13 +1195,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
        list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
                list_del(&mapping->list);
+               spin_lock(&vm->it_lock);
                interval_tree_remove(&mapping->it, &vm->va);
+               spin_unlock(&vm->it_lock);
                trace_amdgpu_vm_bo_unmap(bo_va, mapping);
                list_add(&mapping->list, &vm->freed);
        }
        list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
                list_del(&mapping->list);
+               spin_lock(&vm->it_lock);
                interval_tree_remove(&mapping->it, &vm->va);
+               spin_unlock(&vm->it_lock);
                kfree(mapping);
        }
 
@@ -1248,7 +1260,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        INIT_LIST_HEAD(&vm->invalidated);
        INIT_LIST_HEAD(&vm->cleared);
        INIT_LIST_HEAD(&vm->freed);
-
+       spin_lock_init(&vm->it_lock);
        pd_size = amdgpu_vm_directory_size(adev);
        pd_entries = amdgpu_vm_num_pdes(adev);
 
@@ -1312,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
-
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                unsigned id = vm->ids[i].id;