drm/i915: Inline check required for object syncing prior to execbuf
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 27 Apr 2015 12:41:18 +0000 (13:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 21 May 2015 13:11:43 +0000 (15:11 +0200)
This trims a little overhead from the common case of not needing to
synchronize between rings.

v2: execlists is special and likes to duplicate code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_lrc.c

index 560c79a8a43d5cac451ce2001ffac0578d89259c..bd0e4bda2c649a54bb97a1b791ecb6d52e4438f1 100644 (file)
@@ -889,6 +889,7 @@ static int
 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
                                struct list_head *vmas)
 {
+       const unsigned other_rings = ~intel_ring_flag(ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -896,9 +897,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
-               ret = i915_gem_object_sync(obj, ring);
-               if (ret)
-                       return ret;
+
+               if (obj->active & other_rings) {
+                       ret = i915_gem_object_sync(obj, ring);
+                       if (ret)
+                               return ret;
+               }
 
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
                        flush_chipset |= i915_gem_clflush_object(obj, false);
index a169cca48496efc2377b838c78a69518cc580304..96ae90affad5c29438144ea2a1893244c152fca4 100644 (file)
@@ -628,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
                                 struct list_head *vmas)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
+       const unsigned other_rings = ~intel_ring_flag(ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -636,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
 
-               ret = i915_gem_object_sync(obj, ring);
-               if (ret)
-                       return ret;
+               if (obj->active & other_rings) {
+                       ret = i915_gem_object_sync(obj, ring);
+                       if (ret)
+                               return ret;
+               }
 
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
                        flush_chipset |= i915_gem_clflush_object(obj, false);