drm/i915: Don't downclock whilst we have clients waiting for GPU results
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 27 Apr 2015 12:41:23 +0000 (13:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 21 May 2015 13:11:45 +0000 (15:11 +0200)
If we have clients stalled waiting for requests, ignore the GPU if it
signals that it should downclock due to low load. This helps prevent
the automatic timeout from causing extremely long running batches from
taking even longer.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_irq.c

index c68421a035cc62c06f482977a44fb4e997afe2fa..fece922718e255393768399d3d58b233b524cf64 100644 (file)
@@ -2282,6 +2282,18 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int count_irq_waiters(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *ring;
+       int count = 0;
+       int i;
+
+       for_each_ring(ring, i915, i)
+               count += ring->irq_refcount;
+
+       return count;
+}
+
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
@@ -2298,6 +2310,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        if (ret)
                goto unlock;
 
+       seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+       seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+       seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+       seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+                  intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
                struct task_struct *task;
index 557af8877a2e1c911ecdccc66f2d102aa1309e75..707e2ca8fbd847a43fc2ac4c8e4a01fdf36837c4 100644 (file)
@@ -1070,6 +1070,18 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
        return events;
 }
 
+static bool any_waiters(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *ring;
+       int i;
+
+       for_each_ring(ring, dev_priv, i)
+               if (ring->irq_refcount)
+                       return true;
+
+       return false;
+}
+
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
@@ -1114,6 +1126,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
                        new_delay = dev_priv->rps.efficient_freq;
                        adj = 0;
                }
+       } else if (any_waiters(dev_priv)) {
+               adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
                if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
                        new_delay = dev_priv->rps.efficient_freq;