drm/i915: Limit mmio flip RPS boosts
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 27 Apr 2015 12:41:21 +0000 (13:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 21 May 2015 13:11:44 +0000 (15:11 +0200)
Since we will often pageflip to an active surface, we will often have to
wait for the surface to be written before issuing the flip. Also we are
likely to wait on that surface in plenty of time before the vblank.
Since we have a mechanism for boosting when a flip misses the expected
vblank, curtain the number of times we RPS boost when simply waiting for
mmioflip.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: s/rq/req/]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_pm.c

index 2ac71483cf12ee6dc3fc6bcc93b22d15bf61bbc5..21257faa3f8f2a78236fee31243ef8db9eaae2da 100644 (file)
@@ -2312,6 +2312,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                rcu_read_unlock();
        }
        seq_printf(m, "Semaphore boosts: %d\n", dev_priv->rps.semaphores.rps_boosts);
+       seq_printf(m, "MMIO flip boosts: %d\n", dev_priv->rps.mmioflips.rps_boosts);
        seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
index 2c44ca752e36c86447532ad40de5a192b5e3c154..64d632941e068f64ea532364c4ced6eb52234ad6 100644 (file)
@@ -1087,6 +1087,7 @@ struct intel_gen6_power_mgmt {
        unsigned boosts;
 
        struct drm_i915_file_private semaphores;
+       struct drm_i915_file_private mmioflips;
 
        /* manual wa residency calculations */
        struct intel_rps_ei up_ei, down_ei;
index 53ae5978491dc67165084dd75402a6e035f207ee..048565572764472a7fbf2c99fc5805ed759f8a3b 100644 (file)
@@ -10788,7 +10788,8 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
        if (mmio_flip->req)
                WARN_ON(__i915_wait_request(mmio_flip->req,
                                            mmio_flip->crtc->reset_counter,
-                                           false, NULL, NULL));
+                                           false, NULL,
+                                           &mmio_flip->i915->rps.mmioflips));
 
        intel_do_mmio_flip(mmio_flip->crtc);
 
@@ -10809,6 +10810,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
        if (mmio_flip == NULL)
                return -ENOMEM;
 
+       mmio_flip->i915 = to_i915(dev);
        mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
        mmio_flip->crtc = to_intel_crtc(crtc);
 
index c3c42ead4b46380714b7f1dfbfc40b50e698a81b..5afe1fe2bd6e2110344f12b08eecdfa845c08331 100644 (file)
@@ -460,6 +460,7 @@ struct intel_pipe_wm {
 
 struct intel_mmio_flip {
        struct work_struct work;
+       struct drm_i915_private *i915;
        struct drm_i915_gem_request *req;
        struct intel_crtc *crtc;
 };
index da7120ddb485e5aa7fc8740827b5cd61f70d1430..48e86255b7c836d52b35a0b5bd2172a643a34981 100644 (file)
@@ -6885,6 +6885,7 @@ void intel_pm_setup(struct drm_device *dev)
                          intel_gen6_powersave_work);
        INIT_LIST_HEAD(&dev_priv->rps.clients);
        INIT_LIST_HEAD(&dev_priv->rps.semaphores.rps_boost);
+       INIT_LIST_HEAD(&dev_priv->rps.mmioflips.rps_boost);
 
        dev_priv->pm.suspended = false;
 }