drm/i915/gtt: Allocate va range only if vma is not bound
authorMika Kuoppala <mika.kuoppala@linux.intel.com>
Tue, 28 Apr 2015 14:56:17 +0000 (17:56 +0300)
committerJani Nikula <jani.nikula@intel.com>
Thu, 30 Apr 2015 10:31:24 +0000 (13:31 +0300)
When we have bound vma into an address space, the layout
of page table structures is immutable. So we can be absolutely
certain that if vma is already bound, there is no need to
(re)allocate a virtual address range for it.

v2: - add sanity checks and remove superfluous GLOBAL_BIND set
    - we might do update for an unbound vma (Chris)

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90224
Testcase: igt/gem_exec_big #bdw
Reported-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_gem_gtt.c

index 6fae6bdde156f1675c3ad0953ce4edd9672c4104..9d3852c521c753023d8b1b54dddda9568dd28736 100644 (file)
@@ -1928,8 +1928,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
                vma->vm->insert_entries(vma->vm, pages,
                                        vma->node.start,
                                        cache_level, pte_flags);
-
-               vma->bound |= GLOBAL_BIND;
        }
 
        if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -2804,21 +2802,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags)
 {
-       int ret = 0;
-       u32 bind_flags = 0;
-
-       if (vma->vm->allocate_va_range) {
-               trace_i915_va_alloc(vma->vm, vma->node.start,
-                                   vma->node.size,
-                                   VM_TO_TRACE_NAME(vma->vm));
+       int ret;
+       u32 bind_flags;
 
-               ret = vma->vm->allocate_va_range(vma->vm,
-                                                vma->node.start,
-                                                vma->node.size);
-               if (ret)
-                       return ret;
-       }
+       if (WARN_ON(flags == 0))
+               return -EINVAL;
 
+       bind_flags = 0;
        if (flags & PIN_GLOBAL)
                bind_flags |= GLOBAL_BIND;
        if (flags & PIN_USER)
@@ -2829,8 +2819,23 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        else
                bind_flags &= ~vma->bound;
 
-       if (bind_flags)
-               ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+       if (bind_flags == 0)
+               return 0;
+
+       if (vma->bound == 0 && vma->vm->allocate_va_range) {
+               trace_i915_va_alloc(vma->vm,
+                                   vma->node.start,
+                                   vma->node.size,
+                                   VM_TO_TRACE_NAME(vma->vm));
+
+               ret = vma->vm->allocate_va_range(vma->vm,
+                                                vma->node.start,
+                                                vma->node.size);
+               if (ret)
+                       return ret;
+       }
+
+       ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;