Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 62892a826edec6df73de3d2b49edfc893d6e01a5..b59b6d5b75833e37e204da899c4dda2f65a9bf2d 100644 (file)
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
 }
 
 static int
-render_ring_flush(struct intel_ring_buffer *ring,
-                 u32   invalidate_domains,
-                 u32   flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+                      u32      invalidate_domains,
+                      u32      flush_domains)
+{
+       u32 cmd;
+       int ret;
+
+       cmd = MI_FLUSH;
+       if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+               cmd |= MI_NO_WRITE_FLUSH;
+
+       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+               cmd |= MI_READ_FLUSH;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+                      u32      invalidate_domains,
+                      u32      flush_domains)
 {
        struct drm_device *dev = ring->dev;
        u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
         */
 
        cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-       if ((invalidate_domains|flush_domains) &
-           I915_GEM_DOMAIN_RENDER)
+       if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
                cmd &= ~MI_NO_WRITE_FLUSH;
-       if (INTEL_INFO(dev)->gen < 4) {
-               /*
-                * On the 965, the sampler cache always gets flushed
-                * and this bit is reserved.
-                */
-               if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-                       cmd |= MI_READ_FLUSH;
-       }
        if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
                cmd |= MI_EXE_FLUSH;
 
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
-           I915_READ_START(ring) != obj->gtt_offset ||
-           (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+       if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+                    I915_READ_START(ring) == obj->gtt_offset &&
+                    (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
        int ret = init_ring_common(ring);
 
        if (INTEL_INFO(dev)->gen > 3) {
-               int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
-               I915_WRITE(MI_MODE, mode);
+               I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
                if (IS_GEN7(dev))
                        I915_WRITE(GFX_MODE_GEN7,
-                                  GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
-                                  GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+                                  _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+                                  _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
        }
 
        if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                        return ret;
        }
 
-
        if (IS_GEN6(dev)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                 *  policy is not supported."
                 */
                I915_WRITE(CACHE_MODE_0,
-                          CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+                          _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6) {
-               I915_WRITE(INSTPM,
-                          INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
-       }
+       if (INTEL_INFO(dev)->gen >= 6)
+               I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
        return ret;
 }
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
  * @seqno - seqno which the waiter will block on
  */
 static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
-               struct intel_ring_buffer *signaller,
-               int ring,
-               u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+              struct intel_ring_buffer *signaller,
+              u32 seqno)
 {
        int ret;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
 
+       /* Throughout all of the GEM code, seqno passed implies our current
+        * seqno is >= the last seqno executed. However for hardware the
+        * comparison is strictly greater than.
+        */
+       seqno -= 1;
+
+       WARN_ON(signaller->semaphore_register[waiter->id] ==
+               MI_SEMAPHORE_SYNC_INVALID);
+
        ret = intel_ring_begin(waiter, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+       intel_ring_emit(waiter,
+                       dw1 | signaller->semaphore_register[waiter->id]);
        intel_ring_emit(waiter, seqno);
        intel_ring_emit(waiter, 0);
        intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
        return 0;
 }
 
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
-                   struct intel_ring_buffer *signaller,
-                   u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              RCS,
-                              seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
-                     struct intel_ring_buffer *signaller,
-                     u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              VCS,
-                              seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
-                     struct intel_ring_buffer *signaller,
-                     u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              BCS,
-                              seqno);
-}
-
-
-
 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
 do {                                                                   \
        intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
-                       u32 *result)
-{
-       u32 seqno = i915_gem_next_request_seqno(ring);
-       int ret;
-
-       ret = intel_ring_begin(ring, 4);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, seqno);
-       intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
-
-       *result = seqno;
-       return 0;
-}
-
 static u32
 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
 {
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
        return pc->cpu_page[0];
 }
 
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->gt_irq_mask &= ~mask;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       if (!dev->irq_enabled)
+               return false;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
+               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
+       }
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       return true;
 }
 
 static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->gt_irq_mask |= mask;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
+               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
+       }
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->irq_mask &= ~mask;
-       I915_WRITE(IMR, dev_priv->irq_mask);
-       POSTING_READ(IMR);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       if (!dev->irq_enabled)
+               return false;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
+               dev_priv->irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(IMR, dev_priv->irq_mask);
+               POSTING_READ(IMR);
+       }
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       return true;
 }
 
 static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->irq_mask |= mask;
-       I915_WRITE(IMR, dev_priv->irq_mask);
-       POSTING_READ(IMR);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
+               dev_priv->irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(IMR, dev_priv->irq_mask);
+               POSTING_READ(IMR);
+       }
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
 
        if (!dev->irq_enabled)
                return false;
 
-       spin_lock(&ring->irq_lock);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_irq(dev_priv,
-                                           GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
-               else
-                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+               dev_priv->irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE16(IMR, dev_priv->irq_mask);
+               POSTING_READ16(IMR);
        }
-       spin_unlock(&ring->irq_lock);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
 
 static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
 
-       spin_lock(&ring->irq_lock);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_irq(dev_priv,
-                                            GT_USER_INTERRUPT |
-                                            GT_PIPE_NOTIFY);
-               else
-                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+               dev_priv->irq_mask |= ring->irq_enable_mask;
+               I915_WRITE16(IMR, dev_priv->irq_mask);
+               POSTING_READ16(IMR);
        }
-       spin_unlock(&ring->irq_lock);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
                 u32 *result)
 {
        u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
 }
 
 static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
 
        if (!dev->irq_enabled)
               return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
         * blt/bsd rings on ivb. */
        gen6_gt_force_wake_get(dev_priv);
 
-       spin_lock(&ring->irq_lock);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
-               ring->irq_mask &= ~rflag;
-               I915_WRITE_IMR(ring, ring->irq_mask);
-               ironlake_enable_irq(dev_priv, gflag);
+               I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
        }
-       spin_unlock(&ring->irq_lock);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
 
 static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned long flags;
 
-       spin_lock(&ring->irq_lock);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
-               ring->irq_mask |= rflag;
-               I915_WRITE_IMR(ring, ring->irq_mask);
-               ironlake_disable_irq(dev_priv, gflag);
+               I915_WRITE_IMR(ring, ~0);
+               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
        }
-       spin_unlock(&ring->irq_lock);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        gen6_gt_force_wake_put(dev_priv);
 }
 
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 {
-       struct drm_device *dev = ring->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       if (!dev->irq_enabled)
-               return false;
+       int ret;
 
-       spin_lock(&ring->irq_lock);
-       if (ring->irq_refcount++ == 0) {
-               if (IS_G4X(dev))
-                       i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-               else
-                       ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-       }
-       spin_unlock(&ring->irq_lock);
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
 
-       return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       struct drm_device *dev = ring->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START |
+                       MI_BATCH_GTT |
+                       MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
-       spin_lock(&ring->irq_lock);
-       if (--ring->irq_refcount == 0) {
-               if (IS_G4X(dev))
-                       i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-               else
-                       ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-       }
-       spin_unlock(&ring->irq_lock);
+       return 0;
 }
 
 static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                               u32 offset, u32 len)
 {
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring,
-                       MI_BATCH_BUFFER_START | (2 << 6) |
-                       MI_BATCH_NON_SECURE_I965);
-       intel_ring_emit(ring, offset);
+       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset + len - 8);
+       intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len)
 {
-       struct drm_device *dev = ring->dev;
        int ret;
 
-       if (IS_I830(dev) || IS_845G(dev)) {
-               ret = intel_ring_begin(ring, 4);
-               if (ret)
-                       return ret;
-
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-               intel_ring_emit(ring, offset + len - 8);
-               intel_ring_emit(ring, 0);
-       } else {
-               ret = intel_ring_begin(ring, 2);
-               if (ret)
-                       return ret;
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
 
-               if (INTEL_INFO(dev)->gen >= 4) {
-                       intel_ring_emit(ring,
-                                       MI_BATCH_BUFFER_START | (2 << 6) |
-                                       MI_BATCH_NON_SECURE_I965);
-                       intel_ring_emit(ring, offset);
-               } else {
-                       intel_ring_emit(ring,
-                                       MI_BATCH_BUFFER_START | (2 << 6));
-                       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-               }
-       }
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
        intel_ring_advance(ring);
 
        return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
 static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
-
-       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
 static int init_status_page(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        int ret;
 
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
        ring->status_page.gfx_addr = obj->gtt_offset;
        ring->status_page.page_addr = kmap(obj->pages[0]);
        if (ring->status_page.page_addr == NULL) {
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
                goto err_unpin;
        }
        ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
        return ret;
 }
 
-int intel_init_ring_buffer(struct drm_device *dev,
-                          struct intel_ring_buffer *ring)
+static int intel_init_ring_buffer(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring)
 {
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->gpu_write_list);
+       ring->size = 32 * PAGE_SIZE;
 
        init_waitqueue_head(&ring->irq_queue);
-       spin_lock_init(&ring->irq_lock);
-       ring->irq_mask = ~0;
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unref;
 
-       ring->map.size = ring->size;
-       ring->map.offset = dev->agp->base + obj->gtt_offset;
-       ring->map.type = 0;
-       ring->map.flags = 0;
-       ring->map.mtrr = 0;
-
-       drm_core_ioremap_wc(&ring->map, dev);
-       if (ring->map.handle == NULL) {
+       ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+                                        ring->size);
+       if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
                ret = -EINVAL;
                goto err_unpin;
        }
 
-       ring->virtual_start = ring->map.handle;
        ret = ring->init(ring);
        if (ret)
                goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
        return 0;
 
 err_unmap:
-       drm_core_ioremapfree(&ring->map, dev);
+       iounmap(ring->virtual_start);
 err_unpin:
        i915_gem_object_unpin(obj);
 err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
        I915_WRITE_CTL(ring, 0);
 
-       drm_core_ioremapfree(&ring->map, ring->dev);
+       iounmap(ring->virtual_start);
 
        i915_gem_object_unpin(ring->obj);
        drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
-       unsigned int *virt;
+       uint32_t __iomem *virt;
        int rem = ring->size - ring->tail;
 
        if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
                        return ret;
        }
 
-       virt = (unsigned int *)(ring->virtual_start + ring->tail);
-       rem /= 8;
-       while (rem--) {
-               *virt++ = MI_NOOP;
-               *virt++ = MI_NOOP;
-       }
+       virt = ring->virtual_start + ring->tail;
+       rem /= 4;
+       while (rem--)
+               iowrite32(MI_NOOP, virt++);
 
        ring->tail = 0;
        ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       ret = i915_wait_request(ring, seqno, true);
+       ret = i915_wait_request(ring, seqno);
 
        dev_priv->mm.interruptible = was_interruptible;
+       if (!ret)
+               i915_gem_retire_requests_ring(ring);
 
        return ret;
 }
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
                return ret;
 
        trace_i915_ring_wait_begin(ring);
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               /* With GEM the hangcheck timer should kick us out of the loop,
-                * leaving it early runs the risk of corrupting GEM state (due
-                * to running on almost untested codepaths). But on resume
-                * timers don't work yet, so prevent a complete hang in that
-                * case by choosing an insanely large timeout. */
-               end = jiffies + 60 * HZ;
-       else
-               end = jiffies + 3 * HZ;
+       /* With GEM the hangcheck timer should kick us out of the loop,
+        * leaving it early runs the risk of corrupting GEM state (due
+        * to running on almost untested codepaths). But on resume
+        * timers don't work yet, so prevent a complete hang in that
+        * case by choosing an insanely large timeout. */
+       end = jiffies + 60 * HZ;
 
        do {
                ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
 {
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
        ring->tail &= ring->size - 1;
+       if (dev_priv->stop_rings & intel_ring_flag(ring))
+               return;
        ring->write_tail(ring, ring->tail);
 }
 
-static const struct intel_ring_buffer render_ring = {
-       .name                   = "render ring",
-       .id                     = RCS,
-       .mmio_base              = RENDER_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_render_ring,
-       .write_tail             = ring_write_tail,
-       .flush                  = render_ring_flush,
-       .add_request            = render_ring_add_request,
-       .get_seqno              = ring_get_seqno,
-       .irq_get                = render_ring_get_irq,
-       .irq_put                = render_ring_put_irq,
-       .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
-       .cleanup                = render_ring_cleanup,
-       .sync_to                = render_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
-                                  MI_SEMAPHORE_SYNC_RV,
-                                  MI_SEMAPHORE_SYNC_RB},
-       .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
-       .name                   = "bsd ring",
-       .id                     = VCS,
-       .mmio_base              = BSD_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = ring_write_tail,
-       .flush                  = bsd_ring_flush,
-       .add_request            = ring_add_request,
-       .get_seqno              = ring_get_seqno,
-       .irq_get                = bsd_ring_get_irq,
-       .irq_put                = bsd_ring_put_irq,
-       .dispatch_execbuffer    = ring_dispatch_execbuffer,
-};
-
 
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                                     u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_USER_INTERRUPT,
-                                GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_put_irq(ring,
-                                GT_USER_INTERRUPT,
-                                GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_GEN6_BSD_USER_INTERRUPT,
-                                GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_put_irq(ring,
-                                GT_GEN6_BSD_USER_INTERRUPT,
-                                GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
-       .name                   = "gen6 bsd ring",
-       .id                     = VCS,
-       .mmio_base              = GEN6_BSD_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = gen6_bsd_ring_write_tail,
-       .flush                  = gen6_ring_flush,
-       .add_request            = gen6_add_request,
-       .get_seqno              = gen6_ring_get_seqno,
-       .irq_get                = gen6_bsd_ring_get_irq,
-       .irq_put                = gen6_bsd_ring_put_irq,
-       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-       .sync_to                = gen6_bsd_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
-                                  MI_SEMAPHORE_SYNC_INVALID,
-                                  MI_SEMAPHORE_SYNC_VB},
-       .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
 /* Blitter support (SandyBridge+) */
 
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_BLT_USER_INTERRUPT,
-                                GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       gen6_ring_put_irq(ring,
-                         GT_BLT_USER_INTERRUPT,
-                         GEN6_BLITTER_USER_INTERRUPT);
-}
-
 static int blt_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate, u32 flush)
 {
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static const struct intel_ring_buffer gen6_blt_ring = {
-       .name                   = "blt ring",
-       .id                     = BCS,
-       .mmio_base              = BLT_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = ring_write_tail,
-       .flush                  = blt_ring_flush,
-       .add_request            = gen6_add_request,
-       .get_seqno              = gen6_ring_get_seqno,
-       .irq_get                = blt_ring_get_irq,
-       .irq_put                = blt_ring_put_irq,
-       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-       .sync_to                = gen6_blt_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
-                                  MI_SEMAPHORE_SYNC_BV,
-                                  MI_SEMAPHORE_SYNC_INVALID},
-       .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-       *ring = render_ring;
+       ring->name = "render ring";
+       ring->id = RCS;
+       ring->mmio_base = RENDER_RING_BASE;
+
        if (INTEL_INFO(dev)->gen >= 6) {
                ring->add_request = gen6_add_request;
                ring->flush = gen6_render_ring_flush;
-               ring->irq_get = gen6_render_ring_get_irq;
-               ring->irq_put = gen6_render_ring_put_irq;
+               ring->irq_get = gen6_ring_get_irq;
+               ring->irq_put = gen6_ring_put_irq;
+               ring->irq_enable_mask = GT_USER_INTERRUPT;
                ring->get_seqno = gen6_ring_get_seqno;
+               ring->sync_to = gen6_ring_sync;
+               ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+               ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+               ring->signal_mbox[0] = GEN6_VRSYNC;
+               ring->signal_mbox[1] = GEN6_BRSYNC;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
+               ring->flush = gen4_render_ring_flush;
                ring->get_seqno = pc_render_get_seqno;
+               ring->irq_get = gen5_ring_get_irq;
+               ring->irq_put = gen5_ring_put_irq;
+               ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+       } else {
+               ring->add_request = i9xx_add_request;
+               if (INTEL_INFO(dev)->gen < 4)
+                       ring->flush = gen2_render_ring_flush;
+               else
+                       ring->flush = gen4_render_ring_flush;
+               ring->get_seqno = ring_get_seqno;
+               if (IS_GEN2(dev)) {
+                       ring->irq_get = i8xx_ring_get_irq;
+                       ring->irq_put = i8xx_ring_put_irq;
+               } else {
+                       ring->irq_get = i9xx_ring_get_irq;
+                       ring->irq_put = i9xx_ring_put_irq;
+               }
+               ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
+       ring->write_tail = ring_write_tail;
+       if (INTEL_INFO(dev)->gen >= 6)
+               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_INFO(dev)->gen >= 4)
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev) || IS_845G(dev))
+               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+       ring->init = init_render_ring;
+       ring->cleanup = render_ring_cleanup;
+
 
        if (!I915_NEED_GFX_HWS(dev)) {
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-       *ring = render_ring;
+       ring->name = "render ring";
+       ring->id = RCS;
+       ring->mmio_base = RENDER_RING_BASE;
+
        if (INTEL_INFO(dev)->gen >= 6) {
-               ring->add_request = gen6_add_request;
-               ring->irq_get = gen6_render_ring_get_irq;
-               ring->irq_put = gen6_render_ring_put_irq;
-       } else if (IS_GEN5(dev)) {
-               ring->add_request = pc_render_add_request;
-               ring->get_seqno = pc_render_get_seqno;
+               /* non-kms not supported on gen6+ */
+               return -ENODEV;
+       }
+
+       /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+        * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+        * the special gen5 functions. */
+       ring->add_request = i9xx_add_request;
+       if (INTEL_INFO(dev)->gen < 4)
+               ring->flush = gen2_render_ring_flush;
+       else
+               ring->flush = gen4_render_ring_flush;
+       ring->get_seqno = ring_get_seqno;
+       if (IS_GEN2(dev)) {
+               ring->irq_get = i8xx_ring_get_irq;
+               ring->irq_put = i8xx_ring_put_irq;
+       } else {
+               ring->irq_get = i9xx_ring_get_irq;
+               ring->irq_put = i9xx_ring_put_irq;
        }
+       ring->irq_enable_mask = I915_USER_INTERRUPT;
+       ring->write_tail = ring_write_tail;
+       if (INTEL_INFO(dev)->gen >= 4)
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev) || IS_845G(dev))
+               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+       ring->init = init_render_ring;
+       ring->cleanup = render_ring_cleanup;
 
        if (!I915_NEED_GFX_HWS(dev))
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        if (IS_I830(ring->dev))
                ring->effective_size -= 128;
 
-       ring->map.offset = start;
-       ring->map.size = size;
-       ring->map.type = 0;
-       ring->map.flags = 0;
-       ring->map.mtrr = 0;
-
-       drm_core_ioremap_wc(&ring->map, dev);
-       if (ring->map.handle == NULL) {
+       ring->virtual_start = ioremap_wc(start, size);
+       if (ring->virtual_start == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
 
-       ring->virtual_start = (void __force __iomem *)ring->map.handle;
        return 0;
 }
 
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
 
-       if (IS_GEN6(dev) || IS_GEN7(dev))
-               *ring = gen6_bsd_ring;
-       else
-               *ring = bsd_ring;
+       ring->name = "bsd ring";
+       ring->id = VCS;
+
+       ring->write_tail = ring_write_tail;
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               ring->mmio_base = GEN6_BSD_RING_BASE;
+               /* gen6 bsd needs a special wa for tail updates */
+               if (IS_GEN6(dev))
+                       ring->write_tail = gen6_bsd_ring_write_tail;
+               ring->flush = gen6_ring_flush;
+               ring->add_request = gen6_add_request;
+               ring->get_seqno = gen6_ring_get_seqno;
+               ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+               ring->irq_get = gen6_ring_get_irq;
+               ring->irq_put = gen6_ring_put_irq;
+               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+               ring->sync_to = gen6_ring_sync;
+               ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+               ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+               ring->signal_mbox[0] = GEN6_RVSYNC;
+               ring->signal_mbox[1] = GEN6_BVSYNC;
+       } else {
+               ring->mmio_base = BSD_RING_BASE;
+               ring->flush = bsd_ring_flush;
+               ring->add_request = i9xx_add_request;
+               ring->get_seqno = ring_get_seqno;
+               if (IS_GEN5(dev)) {
+                       ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+                       ring->irq_get = gen5_ring_get_irq;
+                       ring->irq_put = gen5_ring_put_irq;
+               } else {
+                       ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+                       ring->irq_get = i9xx_ring_get_irq;
+                       ring->irq_put = i9xx_ring_put_irq;
+               }
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       }
+       ring->init = init_ring_common;
+
 
        return intel_init_ring_buffer(dev, ring);
 }
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
-       *ring = gen6_blt_ring;
+       ring->name = "blitter ring";
+       ring->id = BCS;
+
+       ring->mmio_base = BLT_RING_BASE;
+       ring->write_tail = ring_write_tail;
+       ring->flush = blt_ring_flush;
+       ring->add_request = gen6_add_request;
+       ring->get_seqno = gen6_ring_get_seqno;
+       ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+       ring->irq_get = gen6_ring_get_irq;
+       ring->irq_put = gen6_ring_put_irq;
+       ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       ring->sync_to = gen6_ring_sync;
+       ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+       ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+       ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->signal_mbox[0] = GEN6_RBSYNC;
+       ring->signal_mbox[1] = GEN6_VBSYNC;
+       ring->init = init_ring_common;
 
        return intel_init_ring_buffer(dev, ring);
 }