drm/i915: Infrastructure for supporting different GGTT views per object
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index 1a0611bb576b3eb9b78ede149955f36b798360df..3927d931ad730db079a2bd28dd847d3e8fd38862 100644 (file)
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        goto err;
                }
 
+               WARN_ONCE(obj->base.dumb,
+                         "GPU use of dumb buffer is illegal.\n");
+
                drm_gem_object_reference(&obj->base);
                list_add_tail(&obj->obj_exec_link, &objects);
        }
@@ -357,11 +360,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
         * through the ppgtt for non_secure batchbuffers. */
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-           !target_i915_obj->has_global_gtt_mapping)) {
-               struct i915_vma *vma =
-                       list_first_entry(&target_i915_obj->vma_list,
-                                        typeof(*vma), vma_link);
-               vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
+           !(target_vma->bound & GLOBAL_BIND))) {
+               ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+                                   GLOBAL_BIND);
+               if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+                       return ret;
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
@@ -531,7 +534,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 
        flags = 0;
        if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
-               flags |= PIN_MAPPABLE;
+               flags |= PIN_GLOBAL | PIN_MAPPABLE;
        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
                flags |= PIN_GLOBAL;
        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -946,7 +949,7 @@ void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_engine_cs *ring)
 {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req = intel_ring_get_request(ring);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
@@ -963,7 +966,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_write_req, req);
 
                        intel_fb_obj_invalidate(obj, ring);
 
@@ -971,7 +974,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
                }
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       obj->last_fenced_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@@ -993,7 +996,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
        ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)__i915_add_request(ring, file, obj, NULL);
+       (void)__i915_add_request(ring, file, obj);
 }
 
 static int
@@ -1023,6 +1026,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
        return 0;
 }
 
+static int
+i915_emit_box(struct intel_engine_cs *ring,
+             struct drm_clip_rect *box,
+             int DR1, int DR4)
+{
+       int ret;
+
+       if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+           box->y2 <= 0 || box->x2 <= 0) {
+               DRM_ERROR("Bad box %d,%d..%d,%d\n",
+                         box->x1, box->y1, box->x2, box->y2);
+               return -EINVAL;
+       }
+
+       if (INTEL_INFO(ring->dev)->gen >= 4) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
+               intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+               intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+               intel_ring_emit(ring, DR4);
+       } else {
+               ret = intel_ring_begin(ring, 6);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
+               intel_ring_emit(ring, DR1);
+               intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+               intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+               intel_ring_emit(ring, DR4);
+               intel_ring_emit(ring, 0);
+       }
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+
 int
 i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                               struct intel_engine_cs *ring,
@@ -1151,7 +1195,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
-                       ret = i915_emit_box(dev, &cliprects[i],
+                       ret = i915_emit_box(ring, &cliprects[i],
                                            args->DR1, args->DR4);
                        if (ret)
                                goto error;
@@ -1170,7 +1214,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                        return ret;
        }
 
-       trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
 
        i915_gem_execbuffer_move_to_active(vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1300,12 +1344,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto pre_mutex_err;
 
-       if (dev_priv->ums.mm_suspended) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EBUSY;
-               goto pre_mutex_err;
-       }
-
        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
        if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1406,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                      batch_obj,
                                      args->batch_start_offset,
                                      file->is_master);
-               if (ret)
-                       goto err;
-
-               /*
-                * XXX: Actually do this when enabling batch copy...
-                *
-                * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
-                * from MI_BATCH_BUFFER_START commands issued in the
-                * dispatch_execbuffer implementations. We specifically don't
-                * want that set when the command parser is enabled.
-                */
+               if (ret) {
+                       if (ret != -EACCES)
+                               goto err;
+               } else {
+                       /*
+                        * XXX: Actually do this when enabling batch copy...
+                        *
+                        * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+                        * from MI_BATCH_BUFFER_START commands issued in the
+                        * dispatch_execbuffer implementations. We specifically don't
+                        * want that set when the command parser is enabled.
+                        */
+               }
        }
 
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure