drm/i915/ringbuffer: Make IRQ refcnting atomic
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 13 Dec 2010 16:54:50 +0000 (16:54 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 14 Dec 2010 11:34:46 +0000 (11:34 +0000)
In order to enforce the correct memory barriers for irq get/put, we need
to perform the actual counting using atomic operations.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 27fa2a1b26a56dfc5066248c74c257ad9672804d..726c2ccd674c5f934bc0ff20bcba979674af763e 100644 (file)
@@ -2000,17 +2000,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                trace_i915_gem_request_wait_begin(dev, seqno);
 
                ring->waiting_seqno = seqno;
-               ring->irq_get(ring);
-               if (interruptible)
-                       ret = wait_event_interruptible(ring->irq_queue,
-                               i915_seqno_passed(ring->get_seqno(ring), seqno)
-                               || atomic_read(&dev_priv->mm.wedged));
-               else
-                       wait_event(ring->irq_queue,
-                               i915_seqno_passed(ring->get_seqno(ring), seqno)
-                               || atomic_read(&dev_priv->mm.wedged));
+               ret = -ENODEV;
+               if (ring->irq_get(ring)) {
+                       if (interruptible)
+                               ret = wait_event_interruptible(ring->irq_queue,
+                                                              i915_seqno_passed(ring->get_seqno(ring), seqno)
+                                                              || atomic_read(&dev_priv->mm.wedged));
+                       else
+                               wait_event(ring->irq_queue,
+                                          i915_seqno_passed(ring->get_seqno(ring), seqno)
+                                          || atomic_read(&dev_priv->mm.wedged));
 
-               ring->irq_put(ring);
+                       ring->irq_put(ring);
+               }
                ring->waiting_seqno = 0;
 
                trace_i915_gem_request_wait_end(dev, seqno);
@@ -3157,14 +3159,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                 * generation is designed to be run atomically and so is
                 * lockless.
                 */
-               ring->irq_get(ring);
-               ret = wait_event_interruptible(ring->irq_queue,
-                                              i915_seqno_passed(ring->get_seqno(ring), seqno)
-                                              || atomic_read(&dev_priv->mm.wedged));
-               ring->irq_put(ring);
+               if (ring->irq_get(ring)) {
+                       ret = wait_event_interruptible(ring->irq_queue,
+                                                      i915_seqno_passed(ring->get_seqno(ring), seqno)
+                                                      || atomic_read(&dev_priv->mm.wedged));
+                       ring->irq_put(ring);
 
-               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-                       ret = -EIO;
+                       if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                               ret = -EIO;
+               }
        }
 
        if (ret == 0)
index 02e4dd82f75406749d1d3647634ddcc91a1baf6b..2ddb98b5c90fb94c053a5ecfbbb81c4dee26bd11 100644 (file)
@@ -1186,10 +1186,9 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-       if (dev_priv->trace_irq_seqno == 0)
-               ring->irq_get(ring);
-
-       dev_priv->trace_irq_seqno = seqno;
+       if (dev_priv->trace_irq_seqno == 0 &&
+           ring->irq_get(ring))
+               dev_priv->trace_irq_seqno = seqno;
 }
 
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        if (master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-       ring->irq_get(ring);
-       DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
-                   READ_BREADCRUMB(dev_priv) >= irq_nr);
-       ring->irq_put(ring);
+       ret = -ENODEV;
+       if (ring->irq_get(ring)) {
+               DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+                           READ_BREADCRUMB(dev_priv) >= irq_nr);
+               ring->irq_put(ring);
+       }
 
        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
index 74b99718a1fbc29c8feac1929b7bdd722b6b36cc..a3fd993e0de0b6fe43d33ae4503edf124df8252e 100644 (file)
@@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring)
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static void
+static bool
 render_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
 
-       if (dev->irq_enabled && ++ring->irq_refcount == 1) {
+       if (!dev->irq_enabled)
+               return false;
+
+       if (atomic_inc_return(&ring->irq_refcount) == 1) {
                drm_i915_private_t *dev_priv = dev->dev_private;
                unsigned long irqflags;
 
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
                if (HAS_PCH_SPLIT(dev))
                        ironlake_enable_graphics_irq(dev_priv,
                                                     GT_USER_INTERRUPT);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+
+       return true;
 }
 
 static void
@@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
 
-       BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
-       if (dev->irq_enabled && --ring->irq_refcount == 0) {
+       if (atomic_dec_and_test(&ring->irq_refcount)) {
                drm_i915_private_t *dev_priv = dev->dev_private;
                unsigned long irqflags;
 
@@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static void
+static bool
 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
 {
        struct drm_device *dev = ring->dev;
 
-       if (dev->irq_enabled && ++ring->irq_refcount == 1) {
+       if (!dev->irq_enabled)
+              return false;
+
+       if (atomic_inc_return(&ring->irq_refcount) == 1) {
                drm_i915_private_t *dev_priv = dev->dev_private;
                unsigned long irqflags;
 
@@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
                ironlake_enable_graphics_irq(dev_priv, flag);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+
+       return true;
 }
 
 static void
@@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
 {
        struct drm_device *dev = ring->dev;
 
-       if (dev->irq_enabled && --ring->irq_refcount == 0) {
+       if (atomic_dec_and_test(&ring->irq_refcount)) {
                drm_i915_private_t *dev_priv = dev->dev_private;
                unsigned long irqflags;
 
@@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
        }
 }
 
-
-static void
+static bool
 bsd_ring_get_irq(struct intel_ring_buffer *ring)
 {
-    ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+       return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
 }
 static void
 bsd_ring_put_irq(struct intel_ring_buffer *ring)
 {
-    ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
+       ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
 }
 
 static int
@@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static void
+static bool
 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
 {
-    ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+       return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
 }
 
 static void
 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
 {
-    ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+       ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
 }
 
 /* ring buffer for Video Codec for Gen6+ */
@@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
 
 /* Blitter support (SandyBridge+) */
 
-static void
+static bool
 blt_ring_get_irq(struct intel_ring_buffer *ring)
 {
-    ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
+       return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
 }
 
 static void
 blt_ring_put_irq(struct intel_ring_buffer *ring)
 {
-    ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
+       ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
 }
 
 
index 9652e4600b5ec2eb047865690b0ae808b1bc074b..8e2e357ad6eec99e47ae826b1d4d80ff399b9809 100644 (file)
@@ -54,8 +54,8 @@ struct  intel_ring_buffer {
        u32             irq_seqno;              /* last seq seem at irq time */
        u32             waiting_seqno;
        u32             sync_seqno[I915_NUM_RINGS-1];
-       u32             irq_refcount;
-       void            (*irq_get)(struct intel_ring_buffer *ring);
+       atomic_t        irq_refcount;
+       bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
        void            (*irq_put)(struct intel_ring_buffer *ring);
 
        int             (*init)(struct intel_ring_buffer *ring);