drm/i915: introduce intel_ring_buffer structure (V2)
authorZou Nan hai <nanhai.zou@intel.com>
Fri, 21 May 2010 01:08:55 +0000 (09:08 +0800)
committerEric Anholt <eric@anholt.net>
Wed, 26 May 2010 20:24:49 +0000 (13:24 -0700)
Introduces a more complete intel_ring_buffer structure with callbacks
for setup and management of a particular ringbuffer, and converts the
render ring buffer consumers to use it.

Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com>
[anholt: Fixed up whitespace fail and rebased against prep patches]
Signed-off-by: Eric Anholt <eric@anholt.net>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h [new file with mode: 0644]
include/drm/i915_drm.h

index 488175c70c7d065a338fecff1d67b6cf0dce2e1b..4fddf094deb2eccf89dceb899fe81076cdc3944f 100644 (file)
@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
        u8 *virt;
        uint32_t *ptr, off;
 
-       if (!dev_priv->render_ring.ring_obj) {
+       if (!dev_priv->render_ring.gem_object) {
                seq_printf(m, "No ringbuffer setup\n");
                return 0;
        }
 
        virt = dev_priv->render_ring.virtual_start;
 
-       for (off = 0; off < dev_priv->render_ring.Size; off += 4) {
+       for (off = 0; off < dev_priv->render_ring.size; off += 4) {
                ptr = (uint32_t *)(virt + off);
                seq_printf(m, "%08x :  %08x\n", off, *ptr);
        }
@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
 
        seq_printf(m, "RingHead :  %08x\n", head);
        seq_printf(m, "RingTail :  %08x\n", tail);
-       seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.Size);
+       seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
        seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
 
        return 0;
index 6de7eace4319a8e33a4db16695ccadd70eb9f55f..2541428b2fe587b3fe40c19ce03d67c418d3c5cd 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 
-
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
                DRM_ERROR("Can not allocate hardware status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+       dev_priv->render_ring.status_page.page_addr
+               = dev_priv->status_page_dmah->vaddr;
        dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+       memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 
        if (IS_I965G(dev))
                dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring);
+       struct intel_ring_buffer *ring = &dev_priv->render_ring;
 
        /*
         * We should never lose context on the ring with modesetting
@@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
        ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
        ring->space = ring->head - (ring->tail + 8);
        if (ring->space < 0)
-               ring->space += ring->Size;
+               ring->space += ring->size;
 
        if (!dev->primary->master)
                return;
@@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-       if (dev_priv->render_ring.virtual_start) {
-               drm_core_ioremapfree(&dev_priv->render_ring.map, dev);
-               dev_priv->render_ring.virtual_start = NULL;
-               dev_priv->render_ring.map.handle = NULL;
-               dev_priv->render_ring.map.size = 0;
-       }
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 
        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        }
 
        if (init->ring_size != 0) {
-               if (dev_priv->render_ring.ring_obj != NULL) {
+               if (dev_priv->render_ring.gem_object != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
                        return -EINVAL;
                }
 
-               dev_priv->render_ring.Size = init->ring_size;
+               dev_priv->render_ring.size = init->ring_size;
 
                dev_priv->render_ring.map.offset = init->ring_start;
                dev_priv->render_ring.map.size = init->ring_size;
@@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
+       struct intel_ring_buffer *ring;
        DRM_DEBUG_DRIVER("%s\n", __func__);
 
-       if (dev_priv->render_ring.map.handle == NULL) {
+       ring = &dev_priv->render_ring;
+
+       if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
 
        /* Program Hardware Status Page */
-       if (!dev_priv->hw_status_page) {
+       if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
-                               dev_priv->hw_status_page);
-
-       if (dev_priv->status_gfx_addr != 0)
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+                               ring->status_page.page_addr);
+       if (ring->status_page.gfx_addr != 0)
+               ring->setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
        return 0;
@@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
-       RING_LOCALS;
 
-       if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.Size - 8)
+       if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
                return -EINVAL;
 
        BEGIN_LP_RING((dwords+1)&~1);
@@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev,
              struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_clip_rect box = boxes[i];
-       RING_LOCALS;
 
        if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       RING_LOCALS;
 
        dev_priv->counter++;
        if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
                                     drm_i915_batchbuffer_t * batch,
                                     struct drm_clip_rect *cliprects)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = batch->num_cliprects;
        int i = 0, count;
-       RING_LOCALS;
 
        if ((batch->start | batch->used) & 0x7) {
                DRM_ERROR("alignment");
@@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv =
                dev->primary->master->driver_priv;
-       RING_LOCALS;
 
        if (!master_priv->sarea_priv)
                return -EINVAL;
@@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->render_ring.Size - 8, __func__);
+       return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+                                     dev_priv->render_ring.size - 8);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
+       struct intel_ring_buffer *ring = &dev_priv->render_ring;
 
        if (!I915_NEED_GFX_HWS(dev))
                return -EINVAL;
@@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 
        DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
-       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+       ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
@@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
                                " G33 hw status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->hws_map.handle;
+       ring->status_page.page_addr = dev_priv->hws_map.handle;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
 
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
                                dev_priv->status_gfx_addr);
        DRM_DEBUG_DRIVER("load hws at %p\n",
@@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->user_irq_lock);
        spin_lock_init(&dev_priv->error_lock);
-       dev_priv->user_irq_refcount = 0;
        dev_priv->trace_irq_seqno = 0;
 
        ret = drm_vblank_init(dev, I915_NUM_PIPE);
index a1814f65fdb4123bd92e9fb5c95d1aeb9048c4e4..c57c54f403da8fd70f2c4b86e5acb1a3933664a1 100644 (file)
@@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-           !dev_priv->mm.suspended) {
-               drm_i915_ring_buffer_t *ring = &dev_priv->render_ring;
-               struct drm_gem_object *obj = ring->ring_obj;
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+                       !dev_priv->mm.suspended) {
+               struct intel_ring_buffer *ring = &dev_priv->render_ring;
                dev_priv->mm.suspended = 0;
-
-               /* Stop the ring if it's running. */
-               I915_WRITE(PRB0_CTL, 0);
-               I915_WRITE(PRB0_TAIL, 0);
-               I915_WRITE(PRB0_HEAD, 0);
-
-               /* Initialize the ring. */
-               I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-               I915_WRITE(PRB0_CTL,
-                          ((obj->size - 4096) & RING_NR_PAGES) |
-                          RING_NO_REPORT |
-                          RING_VALID);
-               if (!drm_core_check_feature(dev, DRIVER_MODESET))
-                       i915_kernel_lost_context(dev);
-               else {
-                       ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-                       ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-                       ring->space = ring->head - (ring->tail + 8);
-                       if (ring->space < 0)
-                               ring->space += ring->Size;
-               }
-
+               ring->init(dev, ring);
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
index a39440cf1deed6653b3898494322eee9cb248588..6bb7933d49dc45109ef80928c8d4d08d64a2dd5b 100644 (file)
@@ -31,8 +31,8 @@
 #define _I915_DRV_H_
 
 #include "i915_reg.h"
-#include "i915_drm.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 
 /* General customization:
@@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object {
        struct drm_gem_object *cur_obj;
 };
 
-typedef struct _drm_i915_ring_buffer {
-       unsigned long Size;
-       u8 *virtual_start;
-       int head;
-       int tail;
-       int space;
-       drm_local_map_t map;
-       struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
 struct mem_block {
        struct mem_block *next;
        struct mem_block *prev;
@@ -244,7 +234,7 @@ typedef struct drm_i915_private {
        void __iomem *regs;
 
        struct pci_dev *bridge_dev;
-       drm_i915_ring_buffer_t render_ring;
+       struct intel_ring_buffer render_ring;
 
        drm_dma_handle_t *status_page_dmah;
        void *hw_status_page;
@@ -270,8 +260,6 @@ typedef struct drm_i915_private {
        atomic_t irq_received;
        /** Protects user_irq_refcount and irq_mask_reg */
        spinlock_t user_irq_lock;
-       /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-       int user_irq_refcount;
        u32 trace_irq_seqno;
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 irq_mask_reg;
@@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);
 
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
 extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
-void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
-void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
-int i915_gem_init_pipe_control(struct drm_device *dev);
-void i915_gem_cleanup_pipe_control(struct drm_device *dev);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return;
 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
-/* intel_ringbuffer.c */
-extern void i915_gem_flush(struct drm_device *dev,
-                          uint32_t invalidate_domains,
-                          uint32_t flush_domains);
-extern int i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                                       struct drm_i915_gem_execbuffer2 *exec,
-                                       struct drm_clip_rect *cliprects,
-                                       uint64_t exec_offset);
-extern uint32_t i915_ring_add_request(struct drm_device *dev);
-
 /* modesetting */
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        \
-       if (((drm_i915_private_t *)dev->dev_private)->render_ring.ring_obj == NULL) \
+       if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+                       == NULL)                                        \
                LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
 } while (0)
 
@@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 #define I915_VERBOSE 0
 
-#define RING_LOCALS    volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do {                                          \
-       int bytes__ = 4*(n);                                            \
-       if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));        \
-       /* a wrap must occur between instructions so pad beforehand */  \
-       if (unlikely (dev_priv->render_ring.tail + bytes__ > dev_priv->render_ring.Size)) \
-               i915_wrap_ring(dev);                                    \
-       if (unlikely (dev_priv->render_ring.space < bytes__))                   \
-               i915_wait_ring(dev, bytes__, __func__);                 \
-       ring_virt__ = (unsigned int *)                                  \
-               (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail);     \
-       dev_priv->render_ring.tail += bytes__;                                  \
-       dev_priv->render_ring.tail &= dev_priv->render_ring.Size - 1;                   \
-       dev_priv->render_ring.space -= bytes__;                         \
+#define BEGIN_LP_RING(n)  do { \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       if (I915_VERBOSE)                                               \
+               DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));           \
+       intel_ring_begin(dev, &dev_priv->render_ring, 4*(n));           \
 } while (0)
 
-#define OUT_RING(n) do {                                               \
-       if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
-       *ring_virt__++ = (n);                                           \
+
+#define OUT_RING(x) do {                                               \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       if (I915_VERBOSE)                                               \
+               DRM_DEBUG("   OUT_RING %x\n", (int)(x));                \
+       intel_ring_emit(dev, &dev_priv->render_ring, x);                \
 } while (0)
 
 #define ADVANCE_LP_RING() do {                                         \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
        if (I915_VERBOSE)                                               \
-               DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->render_ring.tail);  \
-       I915_WRITE(PRB0_TAIL, dev_priv->render_ring.tail);                      \
+               DRM_DEBUG("ADVANCE_LP_RING %x\n",                       \
+                               dev_priv->render_ring.tail);            \
+       intel_ring_advance(dev, &dev_priv->render_ring);                \
 } while(0)
 
 /**
@@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
+                       (dev_priv->render_ring.status_page.page_addr))[reg])
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX             0x20
 #define I915_BREADCRUMB_INDEX          0x21
 
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
 #define INTEL_INFO(dev)        (((struct drm_i915_private *) (dev)->dev_private)->info)
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
index 95dbe5628a25caf63a7d2d188548698c6ea9da12..58b6e814fae198d6cf74b5008d9dea98625d3bce 100644 (file)
@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                }
        }
 }
+
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                 uint32_t flush_domains)
@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (request == NULL)
                return 0;
 
-       seqno = i915_ring_add_request(dev);
+       seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
+                                                 file_priv, flush_domains);
 
        DRM_DEBUG_DRIVER("%d\n", seqno);
 
@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 static uint32_t
 i915_retire_commands(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;
-       RING_LOCALS;
 
        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+       struct intel_ring_buffer *ring = &(dev_priv->render_ring);
+       if (!ring->status_page.page_addr
+                       || list_empty(&dev_priv->mm.request_list))
                return;
 
        seqno = i915_get_gem_seqno(dev);
@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               i915_user_irq_put(dev);
+
+               ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 }
@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
        u32 ier;
        int ret = 0;
 
+       struct intel_ring_buffer *ring = &dev_priv->render_ring;
        BUG_ON(seqno == 0);
 
        if (atomic_read(&dev_priv->mm.wedged))
@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
                trace_i915_gem_request_wait_begin(dev, seqno);
 
                dev_priv->mm.waiting_gem_seqno = seqno;
-               i915_user_irq_get(dev);
+               ring->user_irq_get(dev, ring);
                if (interruptible)
                        ret = wait_event_interruptible(dev_priv->irq_queue,
                                i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
                                i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));
 
-               i915_user_irq_put(dev);
+               ring->user_irq_put(dev, ring);
                dev_priv->mm.waiting_gem_seqno = 0;
 
                trace_i915_gem_request_wait_end(dev, seqno);
@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 }
 
 
+static void
+i915_gem_flush(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+                       invalidate_domains,
+                       flush_domains);
+}
+
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
+                                                           &dev_priv->render_ring,
+                                                           args,
+                                                           cliprects,
+                                                           exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) {
+       if (dev_priv->mm.suspended ||
+                       dev_priv->render_ring.gem_object == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev)
  * 965+ support PIPE_CONTROL commands, which provide finer grained control
  * over cache flushing.
  */
-int
+static int
 i915_gem_init_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4459,7 +4481,8 @@ err:
        return ret;
 }
 
-void
+
+static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4476,6 +4499,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
        dev_priv->seqno_page = NULL;
 }
 
+int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+       dev_priv->render_ring = render_ring;
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr
+                       = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr,
+                               0, PAGE_SIZE);
+       }
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       return ret;
+       }
+       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       return ret;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
+}
+
 int
 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
index dd91c97de968574d596a72f22bd27ed69e111473..e07c643c8365aaf2c6bf31e970943d1bc5b3cbd0 100644 (file)
@@ -545,7 +545,8 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
        }
 
        if (bbaddr == 0) {
-               ring = (u32 *)(dev_priv->render_ring.virtual_start + dev_priv->render_ring.Size);
+               ring = (u32 *)(dev_priv->render_ring.virtual_start
+                               + dev_priv->render_ring.size);
                while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
                        bbaddr = i915_get_bbaddr(dev, ring);
                        if (bbaddr)
@@ -639,7 +640,8 @@ static void i915_capture_error_state(struct drm_device *dev)
        error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
 
        /* Record the ringbuffer */
-       error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.ring_obj);
+       error->ringbuffer = i915_error_object_create(dev,
+                       dev_priv->render_ring.gem_object);
 
        /* Record buffers on the active list. */
        error->active_bo = NULL;
@@ -984,7 +986,6 @@ static int i915_emit_irq(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       RING_LOCALS;
 
        i915_kernel_lost_context(dev);
 
@@ -1009,9 +1010,10 @@ static int i915_emit_irq(struct drm_device * dev)
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
        if (dev_priv->trace_irq_seqno == 0)
-               i915_user_irq_get(dev);
+               render_ring->user_irq_get(dev, render_ring);
 
        dev_priv->trace_irq_seqno = seqno;
 }
@@ -1021,6 +1023,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int ret = 0;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
        DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
@@ -1034,10 +1037,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        if (master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-       i915_user_irq_get(dev);
+       render_ring->user_irq_get(dev, render_ring);
        DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
                    READ_BREADCRUMB(dev_priv) >= irq_nr);
-       i915_user_irq_put(dev);
+       render_ring->user_irq_put(dev, render_ring);
 
        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
index f469a84cacfd92951ce866257699176ef37a21e5..b867f3c78408259a47db246e7a209315561037c9 100644 (file)
@@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        unsigned long flags;
        int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
        int ret, pipesrc;
-       RING_LOCALS;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL)
index b0e17b06eb6ea08c2c67e57e8b62fe6989cd4e2d..93da83782e5ed50e1cd4e15dbd23ed0666e604a4 100644 (file)
@@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       RING_LOCALS;
 
        BUG_ON(overlay->active);
 
@@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
         drm_i915_private_t *dev_priv = dev->dev_private;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
-       RING_LOCALS;
 
        BUG_ON(!overlay->active);
 
@@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
         drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
        u32 tmp;
-       RING_LOCALS;
 
        if (overlay->last_flip_req != 0) {
                ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
@@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
        u32 flip_addr = overlay->flip_addr;
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       RING_LOCALS;
 
        BUG_ON(!overlay->active);
 
@@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                                         int interruptible)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        u32 flip_addr;
        int ret;
-       RING_LOCALS;
 
        if (overlay->hw_wedged == HW_WEDGED)
                return -EIO;
index 06058ddb4eede7e0365281a52db39fd4194ec85e..5715c4d8cce9c7f8192571ec90b729137cd913f1 100644 (file)
 
 #include "drmP.h"
 #include "drm.h"
-#include "i915_drm.h"
 #include "i915_drv.h"
+#include "i915_drm.h"
 #include "i915_trace.h"
-#include "intel_drv.h"
 
-void
-i915_gem_flush(struct drm_device *dev,
-              uint32_t invalidate_domains,
-              uint32_t flush_domains)
+static void
+render_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd;
-       RING_LOCALS;
-
 #if WATCH_EXEC
        DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
                  invalidate_domains, flush_domains);
 #endif
-       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+       u32 cmd;
+       trace_i915_gem_request_flush(dev, ring->next_seqno,
                                     invalidate_domains, flush_domains);
 
-       if (flush_domains & I915_GEM_DOMAIN_CPU)
-               drm_agp_chipset_flush(dev);
-
        if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
                /*
                 * read/write caches:
@@ -100,19 +94,130 @@ i915_gem_flush(struct drm_device *dev,
 #if WATCH_EXEC
                DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-               BEGIN_LP_RING(2);
-               OUT_RING(cmd);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
+               intel_ring_begin(dev, ring, 8);
+               intel_ring_emit(dev, ring, cmd);
+               intel_ring_emit(dev, ring, MI_NOOP);
+               intel_ring_advance(dev, ring);
        }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
 
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
 }
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+       return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       obj_priv = to_intel_bo(ring->gem_object);
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(ring->regs.ctl, 0);
+       I915_WRITE(ring->regs.head, 0);
+       I915_WRITE(ring->regs.tail, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+
+               I915_WRITE(ring->regs.head, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+       }
+
+       I915_WRITE(ring->regs.ctl,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = I915_READ(ring->regs.head) & HEAD_ADDR;
+       /* If the head is still not zero, the ring is dead */
+       if (head != 0) {
+               DRM_ERROR("%s initialization failed "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+               return -EIO;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_kernel_lost_context(dev);
+       else {
+               ring->head = ring->get_head(dev, ring);
+               ring->tail = ring->get_tail(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+       }
+       return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret = init_ring_common(dev, ring);
+       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+               I915_WRITE(MI_MODE,
+                               (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       }
+       return ret;
+}
+
 #define PIPE_CONTROL_FLUSH(addr)                                       \
+do {                                                                   \
        OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
                 PIPE_CONTROL_DEPTH_STALL);                             \
        OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
        OUT_RING(0);                                                    \
        OUT_RING(0);                                                    \
+} while (0)
 
 /**
  * Creates a new sequence number, emitting a write of it to the status page
@@ -122,21 +227,15 @@ i915_gem_flush(struct drm_device *dev,
  *
  * Returned sequence numbers are nonzero on success.
  */
-uint32_t
-i915_ring_add_request(struct drm_device *dev)
+static u32
+render_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
 {
+       u32 seqno;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t seqno;
-       RING_LOCALS;
-
-       /* Grab the seqno we're going to make this request be, and bump the
-        * next (skipping 0 so it can be the reserved no-seqno value).
-        */
-       seqno = dev_priv->mm.next_gem_seqno;
-       dev_priv->mm.next_gem_seqno++;
-       if (dev_priv->mm.next_gem_seqno == 0)
-               dev_priv->mm.next_gem_seqno++;
-
+       seqno = intel_ring_get_seqno(dev, ring);
        if (HAS_PIPE_CONTROL(dev)) {
                u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
 
@@ -181,13 +280,26 @@ i915_ring_add_request(struct drm_device *dev)
        return seqno;
 }
 
-void i915_user_irq_get(struct drm_device *dev)
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       if (HAS_PIPE_CONTROL(dev))
+               return ((volatile u32 *)(dev_priv->seqno_page))[0];
+       else
+               return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
@@ -196,14 +308,16 @@ void i915_user_irq_get(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
-void i915_user_irq_put(struct drm_device *dev)
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-       if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
@@ -212,20 +326,31 @@ void i915_user_irq_put(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
-/** Dispatch a batchbuffer to the ring
- */
-int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                             struct drm_i915_gem_execbuffer2 *exec,
-                             struct drm_clip_rect *cliprects,
-                             uint64_t exec_offset)
+static void render_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (IS_GEN6(dev)) {
+               I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+               I915_READ(HWS_PGA_GEN6); /* posting read */
+       } else {
+               I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+               I915_READ(HWS_PGA); /* posting read */
+       }
+
+}
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = exec->num_cliprects;
        int i = 0, count;
        uint32_t exec_start, exec_len;
-       RING_LOCALS;
-
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
 
@@ -242,74 +367,61 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
                }
 
                if (IS_I830(dev) || IS_845G(dev)) {
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_BATCH_BUFFER);
-                       OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       OUT_RING(exec_start + exec_len - 4);
-                       OUT_RING(0);
-                       ADVANCE_LP_RING();
+                       intel_ring_begin(dev, ring, 4);
+                       intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+                       intel_ring_emit(dev, ring,
+                                       exec_start | MI_BATCH_NON_SECURE);
+                       intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+                       intel_ring_emit(dev, ring, 0);
                } else {
-                       BEGIN_LP_RING(2);
+                       intel_ring_begin(dev, ring, 4);
                        if (IS_I965G(dev)) {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6) |
-                                        MI_BATCH_NON_SECURE_I965);
-                               OUT_RING(exec_start);
+                               intel_ring_emit(dev, ring,
+                                               MI_BATCH_BUFFER_START | (2 << 6)
+                                               | MI_BATCH_NON_SECURE_I965);
+                               intel_ring_emit(dev, ring, exec_start);
                        } else {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6));
-                               OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+                               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+                                               | (2 << 6));
+                               intel_ring_emit(dev, ring, exec_start |
+                                               MI_BATCH_NON_SECURE);
                        }
-                       ADVANCE_LP_RING();
                }
+               intel_ring_advance(dev, ring);
        }
 
        /* XXX breadcrumb */
        return 0;
 }
 
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
+static void cleanup_status_page(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 
-       if (dev_priv->hws_obj == NULL)
+       obj = ring->status_page.obj;
+       if (obj == NULL)
                return;
-
-       obj = dev_priv->hws_obj;
        obj_priv = to_intel_bo(obj);
 
        kunmap(obj_priv->pages[0]);
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(obj);
-       dev_priv->hws_obj = NULL;
+       ring->status_page.obj = NULL;
 
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-       dev_priv->hw_status_page = NULL;
-
-       if (HAS_PIPE_CONTROL(dev))
-               i915_gem_cleanup_pipe_control(dev);
-
-       /* Write high address into HWS_PGA when disabling. */
-       I915_WRITE(HWS_PGA, 0x1ffff000);
 }
 
-static int
-i915_gem_init_hws(struct drm_device *dev)
+static int init_status_page(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       /* If we need a physical address for the status page, it's already
-        * initialized at driver load time.
-        */
-       if (!I915_NEED_GFX_HWS(dev))
-               return 0;
-
        obj = i915_gem_alloc_object(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate status page\n");
@@ -321,36 +433,21 @@ i915_gem_init_hws(struct drm_device *dev)
 
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
-               drm_gem_object_unreference(obj);
                goto err_unref;
        }
 
-       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
-       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
-       if (dev_priv->hw_status_page == NULL) {
-               DRM_ERROR("Failed to map status page.\n");
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               ret = -EINVAL;
                goto err_unpin;
        }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-       if (HAS_PIPE_CONTROL(dev)) {
-               ret = i915_gem_init_pipe_control(dev);
-               if (ret)
-                       goto err_unpin;
-       }
-
-       dev_priv->hws_obj = obj;
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
-       } else {
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
-       }
-       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+                       ring->name, ring->status_page.gfx_addr);
 
        return 0;
 
@@ -359,43 +456,42 @@ err_unpin:
 err_unref:
        drm_gem_object_unreference(obj);
 err:
-       return 0;
+       return ret;
 }
 
-int
-i915_gem_init_ringbuffer(struct drm_device *dev)
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       drm_i915_ring_buffer_t *ring = &dev_priv->render_ring;
        int ret;
-       u32 head;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       ring->dev = dev;
 
-       ret = i915_gem_init_hws(dev);
-       if (ret != 0)
-               return ret;
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(dev, ring);
+               if (ret)
+                       return ret;
+       }
 
-       obj = i915_gem_alloc_object(dev, 128 * 1024);
+       obj = i915_gem_alloc_object(dev, ring->size);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate ringbuffer\n");
-               i915_gem_cleanup_hws(dev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto cleanup;
        }
-       obj_priv = to_intel_bo(obj);
 
-       ret = i915_gem_object_pin(obj, 4096);
+       ring->gem_object = obj;
+
+       ret = i915_gem_object_pin(obj, ring->alignment);
        if (ret != 0) {
                drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return ret;
+               goto cleanup;
        }
 
-       /* Set up the kernel mapping for the ring. */
-       ring->Size = obj->size;
-
+       obj_priv = to_intel_bo(obj);
+       ring->map.size = ring->size;
        ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
-       ring->map.size = obj->size;
        ring->map.type = 0;
        ring->map.flags = 0;
        ring->map.mtrr = 0;
@@ -403,143 +499,85 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        drm_core_ioremap_wc(&ring->map, dev);
        if (ring->map.handle == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
-               memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
                i915_gem_object_unpin(obj);
                drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return -EINVAL;
-       }
-       ring->ring_obj = obj;
-       ring->virtual_start = ring->map.handle;
-
-       /* Stop the ring if it's running. */
-       I915_WRITE(PRB0_CTL, 0);
-       I915_WRITE(PRB0_TAIL, 0);
-       I915_WRITE(PRB0_HEAD, 0);
-
-       /* Initialize the ring. */
-       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
-               DRM_ERROR("Ring head not reset to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               I915_WRITE(PRB0_HEAD, 0);
-
-               DRM_ERROR("Ring head forced to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
+               ret = -EINVAL;
+               goto cleanup;
        }
 
-       I915_WRITE(PRB0_CTL,
-                  ((obj->size - 4096) & RING_NR_PAGES) |
-                  RING_NO_REPORT |
-                  RING_VALID);
-
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
-               DRM_ERROR("Ring initialization failed "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               return -EIO;
+       ring->virtual_start = ring->map.handle;
+       ret = ring->init(dev, ring);
+       if (ret != 0) {
+               intel_cleanup_ring_buffer(dev, ring);
+               return ret;
        }
 
-       /* Update our cache of the ring state */
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_kernel_lost_context(dev);
        else {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+               ring->head = ring->get_head(dev, ring);
+               ring->tail = ring->get_tail(dev, ring);
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
-                       ring->space += ring->Size;
+                       ring->space += ring->size;
        }
-
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                          (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
-       }
-
-       return 0;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       return ret;
+cleanup:
+       cleanup_status_page(dev, ring);
+       return ret;
 }
 
-void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       if (dev_priv->render_ring.ring_obj == NULL)
+       if (ring->gem_object == NULL)
                return;
 
-       drm_core_ioremapfree(&dev_priv->render_ring.map, dev);
-
-       i915_gem_object_unpin(dev_priv->render_ring.ring_obj);
-       drm_gem_object_unreference(dev_priv->render_ring.ring_obj);
-       dev_priv->render_ring.ring_obj = NULL;
-       memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
+       drm_core_ioremapfree(&ring->map, dev);
 
-       i915_gem_cleanup_hws(dev);
+       i915_gem_object_unpin(ring->gem_object);
+       drm_gem_object_unreference(ring->gem_object);
+       ring->gem_object = NULL;
+       cleanup_status_page(dev, ring);
 }
 
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       volatile unsigned int *virt;
+       unsigned int *virt;
        int rem;
+       rem = ring->size - ring->tail;
 
-       rem = dev_priv->render_ring.Size - dev_priv->render_ring.tail;
-       if (dev_priv->render_ring.space < rem) {
-               int ret = i915_wait_ring(dev, rem, __func__);
+       if (ring->space < rem) {
+               int ret = intel_wait_ring_buffer(dev, ring, rem);
                if (ret)
                        return ret;
        }
-       dev_priv->render_ring.space -= rem;
 
-       virt = (unsigned int *)
-               (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail);
+       virt = (unsigned int *)(ring->virtual_start + ring->tail);
        rem /= 4;
        while (rem--)
                *virt++ = MI_NOOP;
 
-       dev_priv->render_ring.tail = 0;
+       ring->tail = 0;
 
        return 0;
 }
 
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring);
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-       u32 last_acthd = I915_READ(acthd_reg);
-       u32 acthd;
-       u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       int i;
+       unsigned long end;
 
        trace_i915_ring_wait_begin (dev);
-
-       for (i = 0; i < 100000; i++) {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               acthd = I915_READ(acthd_reg);
+       end = jiffies + 3 * HZ;
+       do {
+               ring->head = ring->get_head(dev, ring);
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
-                       ring->space += ring->Size;
+                       ring->space += ring->size;
                if (ring->space >= n) {
                        trace_i915_ring_wait_end (dev);
                        return 0;
@@ -550,19 +588,97 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
                        if (master_priv->sarea_priv)
                                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
                }
+               yield();
+       } while (!time_after(jiffies, end));
+       trace_i915_ring_wait_end (dev);
+       return -EBUSY;
+}
 
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       if (unlikely(ring->tail + n > ring->size))
+               intel_wrap_ring_buffer(dev, ring);
+       if (unlikely(ring->space < n))
+               intel_wait_ring_buffer(dev, ring, n);
+}
 
-               if (ring->head != last_head)
-                       i = 0;
-               if (acthd != last_acthd)
-                       i = 0;
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, unsigned int data)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       *virt = data;
+       ring->tail += 4;
+       ring->tail &= ring->size - 1;
+       ring->space -= 4;
+}
 
-               last_head = ring->head;
-               last_acthd = acthd;
-               msleep_interruptible(10);
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       ring->advance_ring(dev, ring);
+}
 
-       }
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       BUG_ON((len&~(4-1)) != 0);
+       intel_ring_begin(dev, ring, len);
+       memcpy(virt, data, len);
+       ring->tail += len;
+       ring->tail &= ring->size - 1;
+       ring->space -= len;
+       intel_ring_advance(dev, ring);
+}
 
-       trace_i915_ring_wait_end (dev);
-       return -EBUSY;
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       seqno = ring->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++ring->next_seqno == 0)
+               ring->next_seqno = 1;
+       return seqno;
 }
+
+struct intel_ring_buffer render_ring = {
+       .name                   = "render ring",
+       .regs                   = {
+               .ctl = PRB0_CTL,
+               .head = PRB0_HEAD,
+               .tail = PRB0_TAIL,
+               .start = PRB0_START
+       },
+       .ring_flag              = I915_EXEC_RENDER,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = render_setup_status_page,
+       .init                   = init_render_ring,
+       .get_head               = render_ring_get_head,
+       .get_tail               = render_ring_get_tail,
+       .get_active_head        = render_ring_get_active_head,
+       .advance_ring           = render_ring_advance_ring,
+       .flush                  = render_ring_flush,
+       .add_request            = render_ring_add_request,
+       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .user_irq_get           = render_ring_get_user_irq,
+       .user_irq_put           = render_ring_put_user_irq,
+       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644 (file)
index 0000000..d5568d3
--- /dev/null
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+       void            *page_addr;
+       unsigned int    gfx_addr;
+       struct          drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+       const char      *name;
+       struct          ring_regs {
+                       u32 ctl;
+                       u32 head;
+                       u32 tail;
+                       u32 start;
+       } regs;
+       unsigned int    ring_flag;
+       unsigned long   size;
+       unsigned int    alignment;
+       void            *virtual_start;
+       struct          drm_device *dev;
+       struct          drm_gem_object *gem_object;
+
+       unsigned int    head;
+       unsigned int    tail;
+       unsigned int    space;
+       u32             next_seqno;
+       struct intel_hw_status_page status_page;
+
+       u32             irq_gem_seqno;          /* last seq seem at irq time */
+       u32             waiting_gem_seqno;
+       int             user_irq_refcount;
+       void            (*user_irq_get)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*user_irq_put)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*setup_status_page)(struct drm_device *dev,
+                       struct  intel_ring_buffer *ring);
+
+       int             (*init)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+
+       unsigned int    (*get_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_tail)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_active_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*advance_ring)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*flush)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       u32     invalidate_domains,
+                       u32     flush_domains);
+       u32             (*add_request)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_file *file_priv,
+                       u32 flush_domains);
+       u32             (*get_gem_seqno)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_i915_gem_execbuffer2 *exec,
+                       struct drm_clip_rect *cliprects,
+                       uint64_t exec_offset);
+
+       /**
+        * List of objects currently involved in rendering from the
+        * ringbuffer.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head request_list;
+
+       wait_queue_head_t irq_queue;
+       drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+               int reg)
+{
+       u32 *regs = ring->status_page.page_addr;
+       return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
index b64a8d7cdf6d291205f8b89edf3a78d4a6a4d9c6..e9168704cabef4e4058c2d1b2535ec094da51ec1 100644 (file)
@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
-       __u64 flags; /* currently unused */
+#define I915_EXEC_RENDER                 (1<<0)
+#define I915_EXEC_BSD                    (1<<1)
+       __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
 };