drm/amdgpu: move VM manager clean into the VM code again
authorChristian König <christian.koenig@amd.com>
Sun, 15 Nov 2015 19:52:06 +0000 (20:52 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Nov 2015 16:40:27 +0000 (11:40 -0500)
It's not a good idea to duplicate that code.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <davdi1.zhou@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

index d1e16726e22561370eff3f2ea8057b31ecbc428d..beb74854a8a36d9fdfc5e82c9391526e8d9e5701 100644 (file)
@@ -972,6 +972,7 @@ struct amdgpu_vm_manager {
        struct amdgpu_ring                      *vm_pte_funcs_ring;
 };
 
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
index a9fcc995d27efa0936286a44643b0b96535427bd..6bb209bc0d367ad8abee74693d7bd4c23e18a812 100644 (file)
@@ -1318,3 +1318,18 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        mutex_destroy(&vm->mutex);
 }
+
+/**
+ * amdgpu_vm_manager_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       for (i = 0; i < AMDGPU_NUM_VM; ++i)
+               fence_put(adev->vm_manager.active[i]);
+}
index 98ee39001c450ce4dd37eb7a39d6feb9994d0328..7427d8cd4c43fae6068c8a3455ac98f3627ff044 100644 (file)
@@ -961,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
 
 static int gmc_v7_0_sw_fini(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1011,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
 
 static int gmc_v7_0_suspend(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
index c9209b45d90123a3efc78186208c2ab5ed712975..cb0e50ebb5285cef90b4a2879cd3cd800129c296 100644 (file)
@@ -980,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
 
 static int gmc_v8_0_sw_fini(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1032,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
 
 static int gmc_v8_0_suspend(void *handle)
 {
-       int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->vm_manager.enabled) {
-               for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       fence_put(adev->vm_manager.active[i]);
+               amdgpu_vm_manager_fini(adev);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }