drm/i915: Tidy batch pool logic
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 7 Apr 2015 15:20:35 +0000 (16:20 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Apr 2015 06:56:04 +0000 (08:56 +0200)
Move the madvise logic out of the execbuffer main path into the
relatively rare allocation path, making the execbuffer manipulation less
fragile.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 61ae8ff4eaed990d0f21b57b6d0490ca8b3c83d1..9605ff8f2fcdf4f2d3cefc806e96493311efc079 100644 (file)
@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
            batch_len + batch_start_offset > src_obj->base.size)
                return ERR_PTR(-E2BIG);
 
+       if (WARN_ON(dest_obj->pages_pin_count == 0))
+               return ERR_PTR(-ENODEV);
+
        ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
        if (ret) {
                DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
                goto unpin_src;
        }
 
-       ret = i915_gem_object_get_pages(dest_obj);
-       if (ret) {
-               DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
-               goto unmap_src;
-       }
-       i915_gem_object_pin_pages(dest_obj);
-
        ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
        if (ret) {
                DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
        dst = vmap_batch(dest_obj, 0, batch_len);
        if (!dst) {
                DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
-               i915_gem_object_unpin_pages(dest_obj);
                ret = -ENOMEM;
                goto unmap_src;
        }
@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
        }
 
        vunmap(batch_base);
-       i915_gem_object_unpin_pages(shadow_batch_obj);
 
        return ret;
 }
index 564be7c5ea7ea9a28b24add362e8d5a90d0da4fd..21f3356cc0aba7f151fbdb82131573d5bba2b7c3 100644 (file)
@@ -67,25 +67,23 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
                                         struct drm_i915_gem_object,
                                         batch_pool_list);
 
-               WARN_ON(obj->active);
-
-               list_del_init(&obj->batch_pool_list);
+               list_del(&obj->batch_pool_list);
                drm_gem_object_unreference(&obj->base);
        }
 }
 
 /**
- * i915_gem_batch_pool_get() - select a buffer from the pool
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
  * @pool: the batch buffer pool
  * @size: the minimum desired size of the returned buffer
  *
- * Finds or allocates a batch buffer in the pool with at least the requested
- * size. The caller is responsible for any domain, active/inactive, or
- * purgeability management for the returned buffer.
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
  *
  * Note: Callers must hold the struct_mutex
  *
- * Return: the selected batch buffer object
+ * Return: the buffer object or an error pointer
  */
 struct drm_i915_gem_object *
 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
@@ -97,8 +95,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
        WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
        list_for_each_entry_safe(tmp, next,
-                       &pool->cache_list, batch_pool_list) {
-
+                                &pool->cache_list, batch_pool_list) {
                if (tmp->active)
                        continue;
 
@@ -114,25 +111,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
                 * but not 'too much' bigger. A better way to do this
                 * might be to bucket the pool objects based on size.
                 */
-               if (tmp->base.size >= size &&
-                   tmp->base.size <= (2 * size)) {
+               if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
                        obj = tmp;
                        break;
                }
        }
 
-       if (!obj) {
+       if (obj == NULL) {
+               int ret;
+
                obj = i915_gem_alloc_object(pool->dev, size);
-               if (!obj)
+               if (obj == NULL)
                        return ERR_PTR(-ENOMEM);
 
-               list_add_tail(&obj->batch_pool_list, &pool->cache_list);
-       }
-       else
-               /* Keep list in LRU order */
-               list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+               ret = i915_gem_object_get_pages(obj);
+               if (ret)
+                       return ERR_PTR(ret);
 
-       obj->madv = I915_MADV_WILLNEED;
+               obj->madv = I915_MADV_DONTNEED;
+       }
 
+       list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+       i915_gem_object_pin_pages(obj);
        return obj;
 }
index 2522f325e0e6d0d169dc71bce699df604baf8bb3..166d53330ee467dd00b35f35756c1eb7112a402a 100644 (file)
@@ -37,7 +37,6 @@
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
-#define  __EXEC_OBJECT_PURGEABLE (1<<27)
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
                vma->pin_count--;
 
-       if (entry->flags & __EXEC_OBJECT_PURGEABLE)
-               obj->madv = I915_MADV_DONTNEED;
-
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
-                         __EXEC_OBJECT_HAS_PIN |
-                         __EXEC_OBJECT_PURGEABLE);
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
 
 static void eb_destroy(struct eb_vmas *eb)
@@ -1165,11 +1159,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
        if (ret)
                goto err;
 
+       i915_gem_object_unpin_pages(shadow_batch_obj);
+
        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
        vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
        vma->exec_entry = shadow_exec_entry;
-       vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+       vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
        drm_gem_object_reference(&shadow_batch_obj->base);
        list_add_tail(&vma->exec_list, &eb->vmas);
 
@@ -1178,6 +1174,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
        return shadow_batch_obj;
 
 err:
+       i915_gem_object_unpin_pages(shadow_batch_obj);
        if (ret == -EACCES) /* unhandled chained batch */
                return batch_obj;
        else