drm/amdgpu: modify amdgpu_fence_wait_any() to amdgpu_fence_wait_multiple()
authorJunwei Zhang <Jerry.Zhang@amd.com>
Wed, 19 Aug 2015 08:24:19 +0000 (16:24 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Aug 2015 14:38:28 +0000 (10:38 -0400)
Rename the function and update the related code with this modified function.
Add the new parameter of bool wait_all.

If wait_all is true, it will return when all fences are signaled or timeout.
If wait_all is false, it will return when any fence is signaled or timeout.

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

index d050f503d9ffb442b723702635c242fd6af15a32..4addac5f676315fce5f3edfdf971d7f06ae39089 100644 (file)
@@ -440,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
-                         struct amdgpu_fence **fences,
-                         bool intr, long t);
+signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
+                                      struct amdgpu_fence **array,
+                                      uint32_t count,
+                                      bool wait_all,
+                                      bool intr,
+                                      signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
index 98500f1756f7317ffa1627f79f9cb9018ffe7147..ae014fcf524e3d2ccb8c95b4e69dea802a41278d 100644 (file)
@@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
        return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
-static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
+static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
 {
        int idx;
        struct amdgpu_fence *fence;
 
-       idx = 0;
-       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+       for (idx = 0; idx < count; ++idx) {
                fence = fences[idx];
                if (fence) {
                        if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
@@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
        return false;
 }
 
+static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
+{
+       int idx;
+       struct amdgpu_fence *fence;
+
+       for (idx = 0; idx < count; ++idx) {
+               fence = fences[idx];
+               if (fence) {
+                       if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
 struct amdgpu_wait_cb {
        struct fence_cb base;
        struct task_struct *task;
@@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
 static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
                                             signed long t)
 {
-       struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
        struct amdgpu_fence *fence = to_amdgpu_fence(f);
        struct amdgpu_device *adev = fence->ring->adev;
 
-       memset(&array[0], 0, sizeof(array));
-       array[0] = fence;
-
-       return amdgpu_fence_wait_any(adev, array, intr, t);
+       return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
 }
 
-/* wait until any fence in array signaled */
-signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
-                               struct amdgpu_fence **array, bool intr, signed long t)
+/**
+ * Wait the fence array with timeout
+ *
+ * @adev:     amdgpu device
+ * @array:    the fence array with amdgpu fence pointer
+ * @count:    the number of the fence array
+ * @wait_all: the flag of wait all(true) or wait any(false)
+ * @intr:     when sleep, set the current task interruptable or not
+ * @t:        timeout to wait
+ *
+ * If wait_all is true, it will return when all fences are signaled or timeout.
+ * If wait_all is false, it will return when any fence is signaled or timeout.
+ */
+signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
+                                      struct amdgpu_fence **array,
+                                      uint32_t count,
+                                      bool wait_all,
+                                      bool intr,
+                                      signed long t)
 {
        long idx = 0;
-       struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
+       struct amdgpu_wait_cb *cb;
        struct amdgpu_fence *fence;
 
        BUG_ON(!array);
 
-       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+       cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
+       if (cb == NULL) {
+               t = -ENOMEM;
+               goto err_free_cb;
+       }
+
+       for (idx = 0; idx < count; ++idx) {
                fence = array[idx];
                if (fence) {
                        cb[idx].task = current;
                        if (fence_add_callback(&fence->base,
-                                       &cb[idx].base, amdgpu_fence_wait_cb))
-                               return t; /* return if fence is already signaled */
+                                       &cb[idx].base, amdgpu_fence_wait_cb)) {
+                               /* The fence is already signaled */
+                               if (wait_all)
+                                       continue;
+                               else
+                                       goto fence_rm_cb;
+                       }
                }
        }
 
@@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
                 * amdgpu_test_signaled_any must be called after
                 * set_current_state to prevent a race with wake_up_process
                 */
-               if (amdgpu_test_signaled_any(array))
+               if (!wait_all && amdgpu_test_signaled_any(array, count))
+                       break;
+               if (wait_all && amdgpu_test_signaled_all(array, count))
                        break;
 
                if (adev->needs_reset) {
@@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
 
        __set_current_state(TASK_RUNNING);
 
-       idx = 0;
-       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+fence_rm_cb:
+       for (idx = 0; idx < count; ++idx) {
                fence = array[idx];
                if (fence)
                        fence_remove_callback(&fence->base, &cb[idx].base);
        }
 
+err_free_cb:
+       kfree(cb);
+
        return t;
 }
 
index d6398cf45f2400d1c24ed54ac0f5168fdbdc1662..4597899e9758d58d01e6dd6010d5dfac0e5d2d28 100644 (file)
@@ -352,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
                } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
                spin_unlock(&sa_manager->wq.lock);
-               t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
+               t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
+                                               MAX_SCHEDULE_TIMEOUT);
                r = (t > 0) ? 0 : t;
                spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */