Merge branch 'drm-next-3.15' of git://people.freedesktop.org/~deathsimple/linux into...
authorDave Airlie <airlied@redhat.com>
Wed, 5 Mar 2014 04:52:19 +0000 (14:52 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 5 Mar 2014 04:52:19 +0000 (14:52 +1000)
this is the second pull request for 3.15 radeon changes. Highlights this time:
- Better VRAM usage
- VM page table rework
- Enabling different UVD clocks again
- Some general cleanups and improvements

* 'drm-next-3.15' of git://people.freedesktop.org/~deathsimple/linux:
  drm/radeon: remove struct radeon_bo_list
  drm/radeon: drop non blocking allocations from sub allocator
  drm/radeon: remove global vm lock
  drm/radeon: use normal BOs for the page tables v4
  drm/radeon: further cleanup vm flushing & fencing
  drm/radeon: separate gart and vm functions
  drm/radeon: fix VCE suspend/resume
  drm/radeon: fix missing bo reservation
  drm/radeon: limit how much memory TTM can move per IB according to VRAM usage
  drm/radeon: validate relocations in the order determined by userspace v3
  drm/radeon: add buffers to the LRU list from smallest to largest
  drm/radeon: deduplicate code in radeon_gem_busy_ioctl
  drm/radeon: track memory statistics about VRAM and GTT usage and buffer moves v2
  drm/radeon: add a way to get and set initial buffer domains v2
  drm/radeon: use variable UVD clocks
  drm/radeon: cleanup the fence ring locking code
  drm/radeon: improve ring lockup detection code v2

25 files changed:
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c [new file with mode: 0644]
include/uapi/drm/radeon_drm.h

index ed60caa325188df6d206e4fb8a05886326498f98..09433534dc47099b5110208b9b5ee1398cc1991a 100644 (file)
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
        rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
        trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
-       ci_dpm.o dce6_afmt.o
+       ci_dpm.o dce6_afmt.o radeon_vm.o
 
 # add async DMA block
 radeon-y += \
index c7cac07f139b2106041208179d124d8ff5ba20b0..5c8b358f9fbad903fb8615fd7806235fec0e5bf4 100644 (file)
@@ -1165,7 +1165,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case DB_DEPTH_CONTROL:
                track->db_depth_control = radeon_get_ib_value(p, idx);
@@ -1196,12 +1196,12 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        ib[idx] &= ~Z_ARRAY_MODE(0xf);
                        track->db_z_info &= ~Z_ARRAY_MODE(0xf);
-                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
-                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                unsigned bankw, bankh, mtaspect, tile_split;
 
-                               evergreen_tiling_fields(reloc->lobj.tiling_flags,
+                               evergreen_tiling_fields(reloc->tiling_flags,
                                                        &bankw, &bankh, &mtaspect,
                                                        &tile_split);
                                ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1237,7 +1237,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->db_z_read_offset = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->db_z_read_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1249,7 +1249,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->db_z_write_offset = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->db_z_write_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1261,7 +1261,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->db_s_read_offset = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->db_s_read_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1273,7 +1273,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->db_s_write_offset = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->db_s_write_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1297,7 +1297,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
                track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->vgt_strmout_bo[tmp] = reloc->robj;
                track->streamout_dirty = true;
                break;
@@ -1317,7 +1317,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
        case CB_TARGET_MASK:
                track->cb_target_mask = radeon_get_ib_value(p, idx);
                track->cb_dirty = true;
@@ -1381,8 +1381,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
                }
                track->cb_dirty = true;
                break;
@@ -1399,8 +1399,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
                }
                track->cb_dirty = true;
                break;
@@ -1461,10 +1461,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                unsigned bankw, bankh, mtaspect, tile_split;
 
-                               evergreen_tiling_fields(reloc->lobj.tiling_flags,
+                               evergreen_tiling_fields(reloc->tiling_flags,
                                                        &bankw, &bankh, &mtaspect,
                                                        &tile_split);
                                ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1489,10 +1489,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                unsigned bankw, bankh, mtaspect, tile_split;
 
-                               evergreen_tiling_fields(reloc->lobj.tiling_flags,
+                               evergreen_tiling_fields(reloc->tiling_flags,
                                                        &bankw, &bankh, &mtaspect,
                                                        &tile_split);
                                ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1520,7 +1520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->cb_color_fmask_bo[tmp] = reloc->robj;
                break;
        case CB_COLOR0_CMASK:
@@ -1537,7 +1537,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->cb_color_cmask_bo[tmp] = reloc->robj;
                break;
        case CB_COLOR0_FMASK_SLICE:
@@ -1578,7 +1578,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                tmp = (reg - CB_COLOR0_BASE) / 0x3c;
                track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->cb_color_bo[tmp] = reloc->robj;
                track->cb_dirty = true;
                break;
@@ -1594,7 +1594,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
                track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->cb_color_bo[tmp] = reloc->robj;
                track->cb_dirty = true;
                break;
@@ -1606,7 +1606,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->htile_offset = radeon_get_ib_value(p, idx);
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->htile_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1723,7 +1723,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case SX_MEMORY_EXPORT_BASE:
                if (p->rdev->family >= CHIP_CAYMAN) {
@@ -1737,7 +1737,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case CAYMAN_SX_SCATTER_EXPORT_BASE:
                if (p->rdev->family < CHIP_CAYMAN) {
@@ -1751,7 +1751,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case SX_MISC:
                track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1836,7 +1836,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         (idx_value & 0xfffffff0) +
                         ((u64)(tmp & 0xff) << 32);
 
@@ -1882,7 +1882,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         idx_value +
                         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
@@ -1909,7 +1909,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         idx_value +
                         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
@@ -1937,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         radeon_get_ib_value(p, idx+1) +
                         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -2027,7 +2027,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad DISPATCH_INDIRECT\n");
                        return -EINVAL;
                }
-               ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+               ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
                r = evergreen_cs_track_check(p);
                if (r) {
                        dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
@@ -2049,7 +2049,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                return -EINVAL;
                        }
 
-                       offset = reloc->lobj.gpu_offset +
+                       offset = reloc->gpu_offset +
                                 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
                                 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                tmp = radeon_get_ib_value(p, idx) +
                                        ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
-                               offset = reloc->lobj.gpu_offset + tmp;
+                               offset = reloc->gpu_offset + tmp;
 
                                if ((tmp + size) > radeon_bo_size(reloc->robj)) {
                                        dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -2144,7 +2144,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                tmp = radeon_get_ib_value(p, idx+2) +
                                        ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
 
-                               offset = reloc->lobj.gpu_offset + tmp;
+                               offset = reloc->gpu_offset + tmp;
 
                                if ((tmp + size) > radeon_bo_size(reloc->robj)) {
                                        dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -2174,7 +2174,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                DRM_ERROR("bad SURFACE_SYNC\n");
                                return -EINVAL;
                        }
-                       ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                       ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                }
                break;
        case PACKET3_EVENT_WRITE:
@@ -2190,7 +2190,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                DRM_ERROR("bad EVENT_WRITE\n");
                                return -EINVAL;
                        }
-                       offset = reloc->lobj.gpu_offset +
+                       offset = reloc->gpu_offset +
                                 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
                                 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -2212,7 +2212,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
                         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -2234,7 +2234,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
                         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -2302,11 +2302,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                }
                                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                                        ib[idx+1+(i*8)+1] |=
-                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
-                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+                                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                                unsigned bankw, bankh, mtaspect, tile_split;
 
-                                               evergreen_tiling_fields(reloc->lobj.tiling_flags,
+                                               evergreen_tiling_fields(reloc->tiling_flags,
                                                                        &bankw, &bankh, &mtaspect,
                                                                        &tile_split);
                                                ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
@@ -2318,7 +2318,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                        }
                                }
                                texture = reloc->robj;
-                               toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                               toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
 
                                /* tex mip base */
                                tex_dim = ib[idx+1+(i*8)+0] & 0x7;
@@ -2337,7 +2337,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                                DRM_ERROR("bad SET_RESOURCE (tex)\n");
                                                return -EINVAL;
                                        }
-                                       moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                                       moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                                        mipmap = reloc->robj;
                                }
 
@@ -2364,7 +2364,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                        ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
                                }
 
-                               offset64 = reloc->lobj.gpu_offset + offset;
+                               offset64 = reloc->gpu_offset + offset;
                                ib[idx+1+(i*8)+0] = offset64;
                                ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
                                                    (upper_32_bits(offset64) & 0xff);
@@ -2445,7 +2445,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+1] = offset;
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
@@ -2464,7 +2464,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+3] = offset;
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                }
@@ -2493,7 +2493,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                  offset + 8, radeon_bo_size(reloc->robj));
                        return -EINVAL;
                }
-               offset += reloc->lobj.gpu_offset;
+               offset += reloc->gpu_offset;
                ib[idx+0] = offset;
                ib[idx+1] = upper_32_bits(offset) & 0xff;
                break;
@@ -2518,7 +2518,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+1] = offset;
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                } else {
@@ -2542,7 +2542,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+3] = offset;
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                } else {
@@ -2717,7 +2717,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                p->idx += count + 7;
                                break;
                        /* linear */
@@ -2725,8 +2725,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                p->idx += count + 3;
                                break;
                        default:
@@ -2768,10 +2768,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 5;
                                break;
                        /* Copy L2T/T2L */
@@ -2781,22 +2781,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        /* tiled src, linear dst */
                                        src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
-                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
 
                                        dst_offset = radeon_get_ib_value(p, idx + 7);
                                        dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
                                        src_offset = radeon_get_ib_value(p, idx+7);
                                        src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
 
                                        dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                }
                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                        dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
@@ -2827,10 +2827,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst_offset + count, radeon_bo_size(dst_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
-                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
-                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+                               ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+                               ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 5;
                                break;
                        /* Copy L2L, partial */
@@ -2840,10 +2840,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        DRM_ERROR("L2L Partial is cayman only !\n");
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
-                               ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
-                               ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+                               ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+                               ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+                               ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
 
                                p->idx += 9;
                                break;
@@ -2876,12 +2876,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                               ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
-                               ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+                               ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
+                               ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 7;
                                break;
                        /* Copy L2T Frame to Field */
@@ -2916,10 +2916,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 10;
                                break;
                        /* Copy L2T/T2L, partial */
@@ -2932,16 +2932,16 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                /* detile bit */
                                if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
                                        /* tiled src, linear dst */
-                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
 
-                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
-                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
 
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                }
                                p->idx += 12;
                                break;
@@ -2978,10 +2978,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 10;
                                break;
                        /* Copy L2T/T2L (tile units) */
@@ -2992,22 +2992,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        /* tiled src, linear dst */
                                        src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
-                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
 
                                        dst_offset = radeon_get_ib_value(p, idx+7);
                                        dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
                                        src_offset = radeon_get_ib_value(p, idx+7);
                                        src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
 
                                        dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                }
                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                        dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
@@ -3028,8 +3028,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        DRM_ERROR("L2T, T2L Partial is cayman only !\n");
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+                               ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
                                p->idx += 13;
                                break;
                        /* Copy L2T broadcast (tile units) */
@@ -3065,10 +3065,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
                                        return -EINVAL;
                                }
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                p->idx += 10;
                                break;
                        default:
@@ -3089,8 +3089,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                         dst_offset, radeon_bo_size(dst_reloc->robj));
                                return -EINVAL;
                        }
-                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
                        p->idx += 4;
                        break;
                case DMA_PACKET_NOP:
index 1690a2dc0721041a613bb0ee28045a49554ce2dc..0a894aee7406e1131aa27402183d75b7b2d8af6e 100644 (file)
@@ -1274,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
 
        value = radeon_get_ib_value(p, idx);
        tmp = value & 0x003fffff;
-       tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+       tmp += (((u32)reloc->gpu_offset) >> 10);
 
        if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+               if (reloc->tiling_flags & RADEON_TILING_MACRO)
                        tile_flags |= RADEON_DST_TILE_MACRO;
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+               if (reloc->tiling_flags & RADEON_TILING_MICRO) {
                        if (reg == RADEON_SRC_PITCH_OFFSET) {
                                DRM_ERROR("Cannot src blit from microtiled surface\n");
                                radeon_cs_dump_packet(p, pkt);
@@ -1325,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
                        return r;
                }
                idx_value = radeon_get_ib_value(p, idx);
-               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
 
                track->arrays[i + 0].esize = idx_value >> 8;
                track->arrays[i + 0].robj = reloc->robj;
@@ -1337,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
                track->arrays[i + 1].robj = reloc->robj;
                track->arrays[i + 1].esize = idx_value >> 24;
                track->arrays[i + 1].esize &= 0x7F;
@@ -1351,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
                        return r;
                }
                idx_value = radeon_get_ib_value(p, idx);
-               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
                track->arrays[i + 0].robj = reloc->robj;
                track->arrays[i + 0].esize = idx_value >> 8;
                track->arrays[i + 0].esize &= 0x7F;
@@ -1594,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
                track->zb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
                r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1607,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                track->cb[0].robj = reloc->robj;
                track->cb[0].offset = idx_value;
                track->cb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case RADEON_PP_TXOFFSET_0:
        case RADEON_PP_TXOFFSET_1:
@@ -1621,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= RADEON_TXO_MACRO_TILE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= RADEON_TXO_MICRO_TILE_X2;
 
                        tmp = idx_value & ~(0x7 << 2);
                        tmp |= tile_flags;
-                       ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+                       ib[idx] = tmp + ((u32)reloc->gpu_offset);
                } else
-                       ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+                       ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -1648,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->textures[0].cube_info[i].offset = idx_value;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[0].cube_info[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -1666,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->textures[1].cube_info[i].offset = idx_value;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[1].cube_info[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -1684,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->textures[2].cube_info[i].offset = idx_value;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[2].cube_info[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -1702,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= RADEON_COLOR_TILE_ENABLE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
                        tmp = idx_value & ~(0x7 << 16);
@@ -1772,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case RADEON_PP_CNTL:
                {
@@ -1932,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
                r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
                if (r) {
                        return r;
@@ -1946,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
                track->num_arrays = 1;
                track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
 
index b3807edb19365e79e61bf392a66c22156803859e..58f0473aa73fba3cd1e62a786d6776c7a20ccd3b 100644 (file)
@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
                track->zb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
                r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                track->cb[0].robj = reloc->robj;
                track->cb[0].offset = idx_value;
                track->cb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case R200_PP_TXOFFSET_0:
        case R200_PP_TXOFFSET_1:
@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= R200_TXO_MACRO_TILE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= R200_TXO_MICRO_TILE;
 
                        tmp = idx_value & ~(0x7 << 2);
                        tmp |= tile_flags;
-                       ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+                       ib[idx] = tmp + ((u32)reloc->gpu_offset);
                } else
-                       ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+                       ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->textures[i].cube_info[face - 1].offset = idx_value;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                track->textures[i].cube_info[face - 1].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
 
                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= RADEON_COLOR_TILE_ENABLE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
                        tmp = idx_value & ~(0x7 << 16);
@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case RADEON_PP_CNTL:
                {
index 7c63ef840e86abaf04f216201b32ee2f40323b9b..41cdf236ee9ab49b843c8007792aa38af504056e 100644 (file)
@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                track->cb[i].robj = reloc->robj;
                track->cb[i].offset = idx_value;
                track->cb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case R300_ZB_DEPTHOFFSET:
                r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
                track->zb_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case R300_TX_OFFSET_0:
        case R300_TX_OFFSET_0+4:
@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 
                if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
                        ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
-                                 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+                                 ((idx_value & ~31) + (u32)reloc->gpu_offset);
                } else {
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= R300_TXO_MACRO_TILE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= R300_TXO_MICRO_TILE;
-                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                       else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
                                tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
 
-                       tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+                       tmp = idx_value + ((u32)reloc->gpu_offset);
                        tmp |= tile_flags;
                        ib[idx] = tmp;
                }
@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                return r;
                        }
 
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= R300_COLOR_TILE_ENABLE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= R300_COLOR_MICROTILE_ENABLE;
-                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                       else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
                                tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
 
                        tmp = idx_value & ~(0x7 << 16);
@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                return r;
                        }
 
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                tile_flags |= R300_DEPTHMACROTILE_ENABLE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                       if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                tile_flags |= R300_DEPTHMICROTILE_TILED;
-                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                       else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
                                tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
 
                        tmp = idx_value & ~(0x7 << 16);
@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case 0x4e0c:
                /* RB3D_COLOR_CHANNEL_MASK */
@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                track->aa.robj = reloc->robj;
                track->aa.offset = idx_value;
                track->aa_dirty = true;
-               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->gpu_offset);
                break;
        case R300_RB3D_AARESOLVE_PITCH:
                track->aa.pitch = idx_value & 0x3FFE;
@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
                        radeon_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
                r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
                if (r) {
                        return r;
index 2812c7d1ae6f21b5d4ba6550755b5ecc3fc8ef61..12511bb5fd6facea3f6b5eca1a5eaf43093274fe 100644 (file)
@@ -1022,7 +1022,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case SQ_CONFIG:
                track->sq_config = radeon_get_ib_value(p, idx);
@@ -1043,7 +1043,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        track->db_depth_info = radeon_get_ib_value(p, idx);
                        ib[idx] &= C_028010_ARRAY_MODE;
                        track->db_depth_info &= C_028010_ARRAY_MODE;
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
                                track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
                        } else {
@@ -1084,9 +1084,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
                track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->vgt_strmout_bo[tmp] = reloc->robj;
-               track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+               track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
                track->streamout_dirty = true;
                break;
        case VGT_STRMOUT_BUFFER_SIZE_0:
@@ -1105,7 +1105,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case R_028238_CB_TARGET_MASK:
                track->cb_target_mask = radeon_get_ib_value(p, idx);
@@ -1142,10 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
                        track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       if (reloc->tiling_flags & RADEON_TILING_MACRO) {
                                ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
                                track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+                       } else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
                                ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
                                track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
                        }
@@ -1214,7 +1214,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        track->cb_color_frag_bo[tmp] = reloc->robj;
                        track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
-                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                       ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                }
                if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
                        track->cb_dirty = true;
@@ -1245,7 +1245,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        track->cb_color_tile_bo[tmp] = reloc->robj;
                        track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
-                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                       ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                }
                if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
                        track->cb_dirty = true;
@@ -1281,10 +1281,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                tmp = (reg - CB_COLOR0_BASE) / 4;
                track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->cb_color_base_last[tmp] = ib[idx];
                track->cb_color_bo[tmp] = reloc->robj;
-               track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+               track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
                track->cb_dirty = true;
                break;
        case DB_DEPTH_BASE:
@@ -1295,9 +1295,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->db_offset = radeon_get_ib_value(p, idx) << 8;
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->db_bo = reloc->robj;
-               track->db_bo_mc = reloc->lobj.gpu_offset;
+               track->db_bo_mc = reloc->gpu_offset;
                track->db_dirty = true;
                break;
        case DB_HTILE_DATA_BASE:
@@ -1308,7 +1308,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                track->htile_offset = radeon_get_ib_value(p, idx) << 8;
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                track->htile_bo = reloc->robj;
                track->db_dirty = true;
                break;
@@ -1377,7 +1377,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case SX_MEMORY_EXPORT_BASE:
                r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
@@ -1386,7 +1386,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                        "0x%04X\n", reg);
                        return -EINVAL;
                }
-               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                break;
        case SX_MISC:
                track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1672,7 +1672,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         (idx_value & 0xfffffff0) +
                         ((u64)(tmp & 0xff) << 32);
 
@@ -1713,7 +1713,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         idx_value +
                         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
@@ -1765,7 +1765,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                return -EINVAL;
                        }
 
-                       offset = reloc->lobj.gpu_offset +
+                       offset = reloc->gpu_offset +
                                 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
                                 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -1805,7 +1805,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        tmp = radeon_get_ib_value(p, idx) +
                                ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
-                       offset = reloc->lobj.gpu_offset + tmp;
+                       offset = reloc->gpu_offset + tmp;
 
                        if ((tmp + size) > radeon_bo_size(reloc->robj)) {
                                dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -1835,7 +1835,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        tmp = radeon_get_ib_value(p, idx+2) +
                                ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
 
-                       offset = reloc->lobj.gpu_offset + tmp;
+                       offset = reloc->gpu_offset + tmp;
 
                        if ((tmp + size) > radeon_bo_size(reloc->robj)) {
                                dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -1861,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                DRM_ERROR("bad SURFACE_SYNC\n");
                                return -EINVAL;
                        }
-                       ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                       ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                }
                break;
        case PACKET3_EVENT_WRITE:
@@ -1877,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                DRM_ERROR("bad EVENT_WRITE\n");
                                return -EINVAL;
                        }
-                       offset = reloc->lobj.gpu_offset +
+                       offset = reloc->gpu_offset +
                                 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
                                 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -1899,7 +1899,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
 
-               offset = reloc->lobj.gpu_offset +
+               offset = reloc->gpu_offset +
                         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
                         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
@@ -1964,11 +1964,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        DRM_ERROR("bad SET_RESOURCE\n");
                                        return -EINVAL;
                                }
-                               base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                               base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                                if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
-                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                                       if (reloc->tiling_flags & RADEON_TILING_MACRO)
                                                ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
-                                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                                       else if (reloc->tiling_flags & RADEON_TILING_MICRO)
                                                ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
                                }
                                texture = reloc->robj;
@@ -1978,13 +1978,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        DRM_ERROR("bad SET_RESOURCE\n");
                                        return -EINVAL;
                                }
-                               mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                               mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                                mipmap = reloc->robj;
                                r = r600_check_texture_resource(p,  idx+(i*7)+1,
                                                                texture, mipmap,
                                                                base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
                                                                mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
-                                                               reloc->lobj.tiling_flags);
+                                                               reloc->tiling_flags);
                                if (r)
                                        return r;
                                ib[idx+1+(i*7)+2] += base_offset;
@@ -2008,7 +2008,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
                                }
 
-                               offset64 = reloc->lobj.gpu_offset + offset;
+                               offset64 = reloc->gpu_offset + offset;
                                ib[idx+1+(i*8)+0] = offset64;
                                ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
                                                    (upper_32_bits(offset64) & 0xff);
@@ -2118,7 +2118,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+                       ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
                }
                break;
        case PACKET3_SURFACE_BASE_UPDATE:
@@ -2151,7 +2151,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+1] = offset;
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
@@ -2170,7 +2170,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+3] = offset;
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                }
@@ -2199,7 +2199,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                  offset + 8, radeon_bo_size(reloc->robj));
                        return -EINVAL;
                }
-               offset += reloc->lobj.gpu_offset;
+               offset += reloc->gpu_offset;
                ib[idx+0] = offset;
                ib[idx+1] = upper_32_bits(offset) & 0xff;
                break;
@@ -2224,7 +2224,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+1] = offset;
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                } else {
@@ -2248,7 +2248,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                          offset + 4, radeon_bo_size(reloc->robj));
                                return -EINVAL;
                        }
-                       offset += reloc->lobj.gpu_offset;
+                       offset += reloc->gpu_offset;
                        ib[idx+3] = offset;
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                } else {
@@ -2505,14 +2505,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                p->idx += count + 5;
                        } else {
                                dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
-                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                p->idx += count + 3;
                        }
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
@@ -2539,22 +2539,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        /* tiled src, linear dst */
                                        src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
-                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
 
                                        dst_offset = radeon_get_ib_value(p, idx+5);
                                        dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
-                                       ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
                                        src_offset = radeon_get_ib_value(p, idx+5);
                                        src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
-                                       ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
 
                                        dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
                                }
                                p->idx += 7;
                        } else {
@@ -2564,10 +2564,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
 
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                       ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+                                       ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
                                        p->idx += 5;
                                } else {
                                        src_offset = radeon_get_ib_value(p, idx+2);
@@ -2575,10 +2575,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
 
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+                                       ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+                                       ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+                                       ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
                                        p->idx += 4;
                                }
                        }
@@ -2610,8 +2610,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                         dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
                                return -EINVAL;
                        }
-                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
                        p->idx += 4;
                        break;
                case DMA_PACKET_NOP:
index 4581df193932f1b1ad3c9b66178bdf2a1ee547ec..111deab2492a082e7be66a129c7614aaa8b10fbb 100644 (file)
@@ -363,9 +363,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_locked(struct radeon_fence *fence);
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
 int radeon_fence_wait_any(struct radeon_device *rdev,
                          struct radeon_fence **fences,
                          bool intr);
@@ -457,6 +456,7 @@ struct radeon_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
        /* Protected by tbo.reserved */
+       u32                             initial_domain;
        u32                             placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
@@ -479,16 +479,6 @@ struct radeon_bo {
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
-struct radeon_bo_list {
-       struct ttm_validate_buffer tv;
-       struct radeon_bo        *bo;
-       uint64_t                gpu_offset;
-       bool                    written;
-       unsigned                domain;
-       unsigned                alt_domain;
-       u32                     tiling_flags;
-};
-
 int radeon_gem_debugfs_init(struct radeon_device *rdev);
 
 /* sub-allocation manager, it has to be protected by another lock.
@@ -805,8 +795,8 @@ struct radeon_ring {
        unsigned                ring_size;
        unsigned                ring_free_dw;
        int                     count_dw;
-       unsigned long           last_activity;
-       unsigned                last_rptr;
+       atomic_t                last_rptr;
+       atomic64_t              last_activity;
        uint64_t                gpu_addr;
        uint32_t                align_mask;
        uint32_t                ptr_mask;
@@ -858,17 +848,22 @@ struct radeon_mec {
 #define R600_PTE_READABLE      (1 << 5)
 #define R600_PTE_WRITEABLE     (1 << 6)
 
+struct radeon_vm_pt {
+       struct radeon_bo                *bo;
+       uint64_t                        addr;
+};
+
 struct radeon_vm {
-       struct list_head                list;
        struct list_head                va;
        unsigned                        id;
 
        /* contains the page directory */
-       struct radeon_sa_bo             *page_directory;
+       struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
+       unsigned                        max_pde_used;
 
        /* array of page tables, one for each page directory entry */
-       struct radeon_sa_bo             **page_tables;
+       struct radeon_vm_pt             *page_tables;
 
        struct mutex                    mutex;
        /* last fence for cs using this vm */
@@ -880,10 +875,7 @@ struct radeon_vm {
 };
 
 struct radeon_vm_manager {
-       struct mutex                    lock;
-       struct list_head                lru_vm;
        struct radeon_fence             *active[RADEON_NUM_VM];
-       struct radeon_sa_manager        sa_manager;
        uint32_t                        max_pfn;
        /* number of VMIDs */
        unsigned                        nvm;
@@ -986,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev);
 struct radeon_cs_reloc {
        struct drm_gem_object           *gobj;
        struct radeon_bo                *robj;
-       struct radeon_bo_list           lobj;
+       struct ttm_validate_buffer      tv;
+       uint64_t                        gpu_offset;
+       unsigned                        domain;
+       unsigned                        alt_domain;
+       uint32_t                        tiling_flags;
        uint32_t                        handle;
-       uint32_t                        flags;
 };
 
 struct radeon_cs_chunk {
@@ -1012,6 +1007,7 @@ struct radeon_cs_parser {
        unsigned                nrelocs;
        struct radeon_cs_reloc  *relocs;
        struct radeon_cs_reloc  **relocs_ptr;
+       struct radeon_cs_reloc  *vm_bos;
        struct list_head        validated;
        unsigned                dma_reloc_idx;
        /* indices of various chunks */
@@ -1635,7 +1631,6 @@ int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
 
 struct radeon_vce {
        struct radeon_bo        *vcpu_bo;
-       void                    *cpu_addr;
        uint64_t                gpu_addr;
        unsigned                fw_version;
        unsigned                fb_version;
@@ -2117,6 +2112,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp);
 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp);
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *filp);
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
@@ -2307,6 +2304,10 @@ struct radeon_device {
        /* virtual memory */
        struct radeon_vm_manager        vm_manager;
        struct mutex                    gpu_clock_mutex;
+       /* memory stats */
+       atomic64_t                      vram_usage;
+       atomic64_t                      gtt_usage;
+       atomic64_t                      num_bytes_moved;
        /* ACPI interface */
        struct radeon_atif              atif;
        struct radeon_atcs              atcs;
@@ -2794,16 +2795,22 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev,
  */
 int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+                                         struct radeon_vm *vm,
+                                          struct list_head *head);
 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
                                       struct radeon_vm *vm, int ring);
+void radeon_vm_flush(struct radeon_device *rdev,
+                     struct radeon_vm *vm,
+                     int ring);
 void radeon_vm_fence(struct radeon_device *rdev,
                     struct radeon_vm *vm,
                     struct radeon_fence *fence);
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+                                   struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
                        struct radeon_vm *vm,
                        struct radeon_bo *bo,
index f28a8d82fa19e1d49e21ff43837e01c21b578065..2b6e0ebcc13ab5e75b76089dcbcc00eeae08b360 100644 (file)
  * Authors:
  *    Jerome Glisse <glisse@freedesktop.org>
  */
+#include <linux/list_sort.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon_trace.h"
 
+#define RADEON_CS_MAX_PRIORITY         32u
+#define RADEON_CS_NUM_BUCKETS          (RADEON_CS_MAX_PRIORITY + 1)
+
+/* This is based on the bucket sort with O(n) time complexity.
+ * An item with priority "i" is added to bucket[i]. The lists are then
+ * concatenated in descending order.
+ */
+struct radeon_cs_buckets {
+       struct list_head bucket[RADEON_CS_NUM_BUCKETS];
+};
+
+static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
+{
+       unsigned i;
+
+       for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
+               INIT_LIST_HEAD(&b->bucket[i]);
+}
+
+static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
+                                 struct list_head *item, unsigned priority)
+{
+       /* Since buffers which appear sooner in the relocation list are
+        * likely to be used more often than buffers which appear later
+        * in the list, the sort mustn't change the ordering of buffers
+        * with the same priority, i.e. it must be stable.
+        */
+       list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
+}
+
+static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
+                                      struct list_head *out_list)
+{
+       unsigned i;
+
+       /* Connect the sorted buckets in the output list. */
+       for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
+               list_splice(&b->bucket[i], out_list);
+       }
+}
+
 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 {
        struct drm_device *ddev = p->rdev->ddev;
        struct radeon_cs_chunk *chunk;
+       struct radeon_cs_buckets buckets;
        unsigned i, j;
        bool duplicate;
 
@@ -52,8 +95,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
+
+       radeon_cs_buckets_init(&buckets);
+
        for (i = 0; i < p->nrelocs; i++) {
                struct drm_radeon_cs_reloc *r;
+               unsigned priority;
 
                duplicate = false;
                r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
@@ -78,8 +125,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                }
                p->relocs_ptr[i] = &p->relocs[i];
                p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
-               p->relocs[i].lobj.bo = p->relocs[i].robj;
-               p->relocs[i].lobj.written = !!r->write_domain;
+
+               /* The userspace buffer priorities are from 0 to 15. A higher
+                * number means the buffer is more important.
+                * Also, the buffers used for write have a higher priority than
+                * the buffers used for read only, which doubles the range
+                * to 0 to 31. 32 is reserved for the kernel driver.
+                */
+               priority = (r->flags & 0xf) * 2 + !!r->write_domain;
 
                /* the first reloc of an UVD job is the msg and that must be in
                   VRAM, also but everything into VRAM on AGP cards to avoid
@@ -87,29 +140,38 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
                    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
                        /* TODO: is this still needed for NI+ ? */
-                       p->relocs[i].lobj.domain =
+                       p->relocs[i].domain =
                                RADEON_GEM_DOMAIN_VRAM;
 
-                       p->relocs[i].lobj.alt_domain =
+                       p->relocs[i].alt_domain =
                                RADEON_GEM_DOMAIN_VRAM;
 
+                       /* prioritize this over any other relocation */
+                       priority = RADEON_CS_MAX_PRIORITY;
                } else {
                        uint32_t domain = r->write_domain ?
                                r->write_domain : r->read_domains;
 
-                       p->relocs[i].lobj.domain = domain;
+                       p->relocs[i].domain = domain;
                        if (domain == RADEON_GEM_DOMAIN_VRAM)
                                domain |= RADEON_GEM_DOMAIN_GTT;
-                       p->relocs[i].lobj.alt_domain = domain;
+                       p->relocs[i].alt_domain = domain;
                }
 
-               p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+               p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
                p->relocs[i].handle = r->handle;
 
-               radeon_bo_list_add_object(&p->relocs[i].lobj,
-                                         &p->validated);
+               radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
+                                     priority);
        }
-       return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
+
+       radeon_cs_buckets_get_list(&buckets, &p->validated);
+
+       if (p->cs_flags & RADEON_CS_USE_VM)
+               p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
+                                             &p->validated);
+
+       return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
 }
 
 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -290,6 +352,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        return 0;
 }
 
+static int cmp_size_smaller_first(void *priv, struct list_head *a,
+                                 struct list_head *b)
+{
+       struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
+       struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+
+       /* Sort A before B if A is smaller. */
+       return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+}
+
 /**
  * cs_parser_fini() - clean parser states
  * @parser:    parser structure holding parsing context.
@@ -303,6 +375,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
        unsigned i;
 
        if (!error) {
+               /* Sort the buffer list from the smallest to largest buffer,
+                * which affects the order of buffers in the LRU list.
+                * This assures that the smallest buffers are added first
+                * to the LRU list, so they are likely to be later evicted
+                * first, instead of large buffers whose eviction is more
+                * expensive.
+                *
+                * This slightly lowers the number of bytes moved by TTM
+                * per frame under memory pressure.
+                */
+               list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+
                ttm_eu_fence_buffer_objects(&parser->ticket,
                                            &parser->validated,
                                            parser->ib.fence);
@@ -320,6 +404,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
        kfree(parser->track);
        kfree(parser->relocs);
        kfree(parser->relocs_ptr);
+       kfree(parser->vm_bos);
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
        kfree(parser->chunks);
@@ -359,24 +444,32 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
        return r;
 }
 
-static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
-       struct radeon_device *rdev = parser->rdev;
-       struct radeon_bo_list *lobj;
-       struct radeon_bo *bo;
-       int r;
+       struct radeon_device *rdev = p->rdev;
+       int i, r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
-       if (r) {
+       r = radeon_vm_update_page_directory(rdev, vm);
+       if (r)
                return r;
-       }
-       list_for_each_entry(lobj, &parser->validated, tv.head) {
-               bo = lobj->bo;
-               r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
-               if (r) {
+
+       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+                               &rdev->ring_tmp_bo.bo->tbo.mem);
+       if (r)
+               return r;
+
+       for (i = 0; i < p->nrelocs; i++) {
+               struct radeon_bo *bo;
+
+               /* ignore duplicates */
+               if (p->relocs_ptr[i] != &p->relocs[i])
+                       continue;
+
+               bo = p->relocs[i].robj;
+               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               if (r)
                        return r;
-               }
        }
        return 0;
 }
@@ -408,20 +501,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
        if (parser->ring == R600_RING_TYPE_UVD_INDEX)
                radeon_uvd_note_usage(rdev);
 
-       mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&vm->mutex);
-       r = radeon_vm_alloc_pt(rdev, vm);
-       if (r) {
-               goto out;
-       }
        r = radeon_bo_vm_update_pte(parser, vm);
        if (r) {
                goto out;
        }
        radeon_cs_sync_rings(parser);
        radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
-       radeon_semaphore_sync_to(parser->ib.semaphore,
-                                radeon_vm_grab_id(rdev, vm, parser->ring));
 
        if ((rdev->family >= CHIP_TAHITI) &&
            (parser->chunk_const_ib_idx != -1)) {
@@ -430,14 +516,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
                r = radeon_ib_schedule(rdev, &parser->ib, NULL);
        }
 
-       if (!r) {
-               radeon_vm_fence(rdev, vm, parser->ib.fence);
-       }
-
 out:
-       radeon_vm_add_to_lru(rdev, vm);
        mutex_unlock(&vm->mutex);
-       mutex_unlock(&rdev->vm_manager.lock);
        return r;
 }
 
@@ -705,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
        /* FIXME: we assume reloc size is 4 dwords */
        if (nomm) {
                *cs_reloc = p->relocs;
-               (*cs_reloc)->lobj.gpu_offset =
+               (*cs_reloc)->gpu_offset =
                        (u64)relocs_chunk->kdata[idx + 3] << 32;
-               (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+               (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
        } else
                *cs_reloc = p->relocs_ptr[(idx / 4)];
        return 0;
index b012cbbc3ed5a9b892b433eff0ee5f3134a130de..7db44de90d6c17905767d953c5c4026e6f527b1a 100644 (file)
@@ -1191,14 +1191,12 @@ int radeon_device_init(struct radeon_device *rdev,
        r = radeon_gem_init(rdev);
        if (r)
                return r;
-       /* initialize vm here */
-       mutex_init(&rdev->vm_manager.lock);
+
        /* Adjust VM size here.
         * Currently set to 4GB ((1 << 20) 4k pages).
         * Max GPUVM size for cayman and SI is 40 bits.
         */
        rdev->vm_manager.max_pfn = 1 << 20;
-       INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
@@ -1445,10 +1443,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        /* evict vram memory */
        radeon_bo_evict_vram(rdev);
 
-       mutex_lock(&rdev->ring_lock);
        /* wait for gpu to finish processing current batch */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
-               r = radeon_fence_wait_empty_locked(rdev, i);
+               r = radeon_fence_wait_empty(rdev, i);
                if (r) {
                        /* delay GPU reset to resume */
                        force_completion = true;
@@ -1457,7 +1454,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        if (force_completion) {
                radeon_fence_driver_force_completion(rdev);
        }
-       mutex_unlock(&rdev->ring_lock);
 
        radeon_save_bios_scratch_regs(rdev);
 
index 84a1bbb75f914a7bd914ad9120b8a2e2ac9aac5a..4392b7c95ee6d88ead4e4b6e27c27d82dcf58671 100644 (file)
  *   2.35.0 - Add CIK macrotile mode array query
  *   2.36.0 - Fix CIK DCE tiling setup
  *   2.37.0 - allow GS ring setup on r6xx/r7xx
+ *   2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN)
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       37
+#define KMS_DRIVER_MINOR       38
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index c37cb79a9489aadd38a84b2f59a6aed7e56333a5..a77b1c13ea43d6bea244e0da8666c82bb082ea60 100644 (file)
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * @rdev: radeon device pointer
  * @target_seq: sequence number(s) we want to wait for
  * @intr: use interruptable sleep
- * @lock_ring: whether the ring should be locked or not
  *
  * Wait for the requested sequence number(s) to be written by any ring
  * (all asics).  Sequnce number array is indexed by ring id.
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * -EDEADLK is returned when a GPU lockup has been detected.
  */
 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
-                                bool intr, bool lock_ring)
+                                bool intr)
 {
        uint64_t last_seq[RADEON_NUM_RINGS];
        bool signaled;
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                        if (i != RADEON_NUM_RINGS)
                                continue;
 
-                       if (lock_ring)
-                               mutex_lock(&rdev->ring_lock);
-
                        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                                if (!target_seq[i])
                                        continue;
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
 
                                /* remember that we need an reset */
                                rdev->needs_reset = true;
-                               if (lock_ring)
-                                       mutex_unlock(&rdev->ring_lock);
                                wake_up_all(&rdev->fence_queue);
                                return -EDEADLK;
                        }
-
-                       if (lock_ring)
-                               mutex_unlock(&rdev->ring_lock);
                }
        }
        return 0;
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
        if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
                return 0;
 
-       r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+       r = radeon_fence_wait_seq(fence->rdev, seq, intr);
        if (r)
                return r;
 
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
        if (num_rings == 0)
                return -ENOENT;
 
-       r = radeon_fence_wait_seq(rdev, seq, intr, true);
+       r = radeon_fence_wait_seq(rdev, seq, intr);
        if (r) {
                return r;
        }
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
 }
 
 /**
- * radeon_fence_wait_locked - wait for a fence to signal
- *
- * @fence: radeon fence object
- *
- * Wait for the requested fence to signal (all asics).
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int radeon_fence_wait_locked(struct radeon_fence *fence)
-{
-       uint64_t seq[RADEON_NUM_RINGS] = {};
-       int r;
-
-       if (fence == NULL) {
-               WARN(1, "Querying an invalid fence : %p !\n", fence);
-               return -EINVAL;
-       }
-
-       seq[fence->ring] = fence->seq;
-       if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
-               return 0;
-
-       r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
-       if (r)
-               return r;
-
-       fence->seq = RADEON_FENCE_SIGNALED_SEQ;
-       return 0;
-}
-
-/**
- * radeon_fence_wait_next_locked - wait for the next fence to signal
+ * radeon_fence_wait_next - wait for the next fence to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence)
  * Returns 0 if the next fence has passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
        uint64_t seq[RADEON_NUM_RINGS] = {};
 
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
                   already the last emited fence */
                return -ENOENT;
        }
-       return radeon_fence_wait_seq(rdev, seq, false, false);
+       return radeon_fence_wait_seq(rdev, seq, false);
 }
 
 /**
- * radeon_fence_wait_empty_locked - wait for all fences to signal
+ * radeon_fence_wait_empty - wait for all fences to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
  * Returns 0 if the fences have passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 {
        uint64_t seq[RADEON_NUM_RINGS] = {};
        int r;
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
        if (!seq[ring])
                return 0;
 
-       r = radeon_fence_wait_seq(rdev, seq, false, false);
+       r = radeon_fence_wait_seq(rdev, seq, false);
        if (r) {
                if (r == -EDEADLK)
                        return -EDEADLK;
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
        for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
                if (!rdev->fence_drv[ring].initialized)
                        continue;
-               r = radeon_fence_wait_empty_locked(rdev, ring);
+               r = radeon_fence_wait_empty(rdev, ring);
                if (r) {
                        /* no need to trigger GPU reset as we are unloading */
                        radeon_fence_driver_force_completion(rdev);
index a8f9b463bf2a4767d9ed35d150bafffbfa1dfe07..2e723651069bd7d084d393e25c741f122b4c5c79 100644 (file)
@@ -28,8 +28,6 @@
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
-#include "radeon_reg.h"
-#include "radeon_trace.h"
 
 /*
  * GART
@@ -394,959 +392,3 @@ void radeon_gart_fini(struct radeon_device *rdev)
 
        radeon_dummy_page_fini(rdev);
 }
-
-/*
- * GPUVM
- * GPUVM is similar to the legacy gart on older asics, however
- * rather than there being a single global gart table
- * for the entire GPU, there are multiple VM page tables active
- * at any given time.  The VM page tables can contain a mix
- * vram pages and system memory pages and system memory pages
- * can be mapped as snooped (cached system pages) or unsnooped
- * (uncached system pages).
- * Each VM has an ID associated with it and there is a page table
- * associated with each VMID.  When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
- * buffer.  VMIDs are allocated dynamically as commands are submitted.
- * The userspace drivers maintain their own address space and the kernel
- * sets up their pages tables accordingly when they submit their
- * command buffers and a VMID is assigned.
- * Cayman/Trinity support up to 8 active VMs at any given time;
- * SI supports 16.
- */
-
-/*
- * vm helpers
- *
- * TODO bind a default page at vm initialization for default address
- */
-
-/**
- * radeon_vm_num_pde - return the number of page directory entries
- *
- * @rdev: radeon_device pointer
- *
- * Calculate the number of page directory entries (cayman+).
- */
-static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
-{
-       return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
-}
-
-/**
- * radeon_vm_directory_size - returns the size of the page directory in bytes
- *
- * @rdev: radeon_device pointer
- *
- * Calculate the size of the page directory in bytes (cayman+).
- */
-static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
-{
-       return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
-}
-
-/**
- * radeon_vm_manager_init - init the vm manager
- *
- * @rdev: radeon_device pointer
- *
- * Init the vm manager (cayman+).
- * Returns 0 for success, error for failure.
- */
-int radeon_vm_manager_init(struct radeon_device *rdev)
-{
-       struct radeon_vm *vm;
-       struct radeon_bo_va *bo_va;
-       int r;
-       unsigned size;
-
-       if (!rdev->vm_manager.enabled) {
-               /* allocate enough for 2 full VM pts */
-               size = radeon_vm_directory_size(rdev);
-               size += rdev->vm_manager.max_pfn * 8;
-               size *= 2;
-               r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                             RADEON_GPU_PAGE_ALIGN(size),
-                                             RADEON_VM_PTB_ALIGN_SIZE,
-                                             RADEON_GEM_DOMAIN_VRAM);
-               if (r) {
-                       dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
-                               (rdev->vm_manager.max_pfn * 8) >> 10);
-                       return r;
-               }
-
-               r = radeon_asic_vm_init(rdev);
-               if (r)
-                       return r;
-
-               rdev->vm_manager.enabled = true;
-
-               r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
-               if (r)
-                       return r;
-       }
-
-       /* restore page table */
-       list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
-               if (vm->page_directory == NULL)
-                       continue;
-
-               list_for_each_entry(bo_va, &vm->va, vm_list) {
-                       bo_va->valid = false;
-               }
-       }
-       return 0;
-}
-
-/**
- * radeon_vm_free_pt - free the page table for a specific vm
- *
- * @rdev: radeon_device pointer
- * @vm: vm to unbind
- *
- * Free the page table of a specific vm (cayman+).
- *
- * Global and local mutex must be lock!
- */
-static void radeon_vm_free_pt(struct radeon_device *rdev,
-                                   struct radeon_vm *vm)
-{
-       struct radeon_bo_va *bo_va;
-       int i;
-
-       if (!vm->page_directory)
-               return;
-
-       list_del_init(&vm->list);
-       radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-
-       list_for_each_entry(bo_va, &vm->va, vm_list) {
-               bo_va->valid = false;
-       }
-
-       if (vm->page_tables == NULL)
-               return;
-
-       for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
-               radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
-
-       kfree(vm->page_tables);
-}
-
-/**
- * radeon_vm_manager_fini - tear down the vm manager
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the VM manager (cayman+).
- */
-void radeon_vm_manager_fini(struct radeon_device *rdev)
-{
-       struct radeon_vm *vm, *tmp;
-       int i;
-
-       if (!rdev->vm_manager.enabled)
-               return;
-
-       mutex_lock(&rdev->vm_manager.lock);
-       /* free all allocated page tables */
-       list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
-               mutex_lock(&vm->mutex);
-               radeon_vm_free_pt(rdev, vm);
-               mutex_unlock(&vm->mutex);
-       }
-       for (i = 0; i < RADEON_NUM_VM; ++i) {
-               radeon_fence_unref(&rdev->vm_manager.active[i]);
-       }
-       radeon_asic_vm_fini(rdev);
-       mutex_unlock(&rdev->vm_manager.lock);
-
-       radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
-       radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
-       rdev->vm_manager.enabled = false;
-}
-
-/**
- * radeon_vm_evict - evict page table to make room for new one
- *
- * @rdev: radeon_device pointer
- * @vm: VM we want to allocate something for
- *
- * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
- * Returns 0 for success, -ENOMEM for failure.
- *
- * Global and local mutex must be locked!
- */
-static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       struct radeon_vm *vm_evict;
-
-       if (list_empty(&rdev->vm_manager.lru_vm))
-               return -ENOMEM;
-
-       vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
-                                   struct radeon_vm, list);
-       if (vm_evict == vm)
-               return -ENOMEM;
-
-       mutex_lock(&vm_evict->mutex);
-       radeon_vm_free_pt(rdev, vm_evict);
-       mutex_unlock(&vm_evict->mutex);
-       return 0;
-}
-
-/**
- * radeon_vm_alloc_pt - allocates a page table for a VM
- *
- * @rdev: radeon_device pointer
- * @vm: vm to bind
- *
- * Allocate a page table for the requested vm (cayman+).
- * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
- */
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       unsigned pd_size, pd_entries, pts_size;
-       struct radeon_ib ib;
-       int r;
-
-       if (vm == NULL) {
-               return -EINVAL;
-       }
-
-       if (vm->page_directory != NULL) {
-               return 0;
-       }
-
-       pd_size = radeon_vm_directory_size(rdev);
-       pd_entries = radeon_vm_num_pdes(rdev);
-
-retry:
-       r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
-                            &vm->page_directory, pd_size,
-                            RADEON_VM_PTB_ALIGN_SIZE, false);
-       if (r == -ENOMEM) {
-               r = radeon_vm_evict(rdev, vm);
-               if (r)
-                       return r;
-               goto retry;
-
-       } else if (r) {
-               return r;
-       }
-
-       vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
-
-       /* Initially clear the page directory */
-       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
-                         NULL, pd_entries * 2 + 64);
-       if (r) {
-               radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-               return r;
-       }
-
-       ib.length_dw = 0;
-
-       radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
-                               0, pd_entries, 0, 0);
-
-       radeon_semaphore_sync_to(ib.semaphore, vm->fence);
-       r = radeon_ib_schedule(rdev, &ib, NULL);
-       if (r) {
-               radeon_ib_free(rdev, &ib);
-               radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-               return r;
-       }
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(ib.fence);
-       radeon_ib_free(rdev, &ib);
-       radeon_fence_unref(&vm->last_flush);
-
-       /* allocate page table array */
-       pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
-       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
-
-       if (vm->page_tables == NULL) {
-               DRM_ERROR("Cannot allocate memory for page table array\n");
-               radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-/**
- * radeon_vm_add_to_lru - add VMs page table to LRU list
- *
- * @rdev: radeon_device pointer
- * @vm: vm to add to LRU
- *
- * Add the allocated page table to the LRU list (cayman+).
- *
- * Global mutex must be locked!
- */
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       list_del_init(&vm->list);
-       list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
-}
-
-/**
- * radeon_vm_grab_id - allocate the next free VMID
- *
- * @rdev: radeon_device pointer
- * @vm: vm to allocate id for
- * @ring: ring we want to submit job to
- *
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
- *
- * Global and local mutex must be locked!
- */
-struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
-                                      struct radeon_vm *vm, int ring)
-{
-       struct radeon_fence *best[RADEON_NUM_RINGS] = {};
-       unsigned choices[2] = {};
-       unsigned i;
-
-       /* check if the id is still valid */
-       if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
-               return NULL;
-
-       /* we definately need to flush */
-       radeon_fence_unref(&vm->last_flush);
-
-       /* skip over VMID 0, since it is the system VM */
-       for (i = 1; i < rdev->vm_manager.nvm; ++i) {
-               struct radeon_fence *fence = rdev->vm_manager.active[i];
-
-               if (fence == NULL) {
-                       /* found a free one */
-                       vm->id = i;
-                       trace_radeon_vm_grab_id(vm->id, ring);
-                       return NULL;
-               }
-
-               if (radeon_fence_is_earlier(fence, best[fence->ring])) {
-                       best[fence->ring] = fence;
-                       choices[fence->ring == ring ? 0 : 1] = i;
-               }
-       }
-
-       for (i = 0; i < 2; ++i) {
-               if (choices[i]) {
-                       vm->id = choices[i];
-                       trace_radeon_vm_grab_id(vm->id, ring);
-                       return rdev->vm_manager.active[choices[i]];
-               }
-       }
-
-       /* should never happen */
-       BUG();
-       return NULL;
-}
-
-/**
- * radeon_vm_fence - remember fence for vm
- *
- * @rdev: radeon_device pointer
- * @vm: vm we want to fence
- * @fence: fence to remember
- *
- * Fence the vm (cayman+).
- * Set the fence used to protect page table and id.
- *
- * Global and local mutex must be locked!
- */
-void radeon_vm_fence(struct radeon_device *rdev,
-                    struct radeon_vm *vm,
-                    struct radeon_fence *fence)
-{
-       radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
-       rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
-
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(fence);
-
-       radeon_fence_unref(&vm->last_id_use);
-       vm->last_id_use = radeon_fence_ref(fence);
-}
-
-/**
- * radeon_vm_bo_find - find the bo_va for a specific vm & bo
- *
- * @vm: requested vm
- * @bo: requested buffer object
- *
- * Find @bo inside the requested vm (cayman+).
- * Search inside the @bos vm list for the requested vm
- * Returns the found bo_va or NULL if none is found
- *
- * Object has to be reserved!
- */
-struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
-                                      struct radeon_bo *bo)
-{
-       struct radeon_bo_va *bo_va;
-
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               if (bo_va->vm == vm) {
-                       return bo_va;
-               }
-       }
-       return NULL;
-}
-
-/**
- * radeon_vm_bo_add - add a bo to a specific vm
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- *
- * Add @bo into the requested vm (cayman+).
- * Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
- *
- * Object has to be reserved!
- */
-struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
-                                     struct radeon_vm *vm,
-                                     struct radeon_bo *bo)
-{
-       struct radeon_bo_va *bo_va;
-
-       bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
-       if (bo_va == NULL) {
-               return NULL;
-       }
-       bo_va->vm = vm;
-       bo_va->bo = bo;
-       bo_va->soffset = 0;
-       bo_va->eoffset = 0;
-       bo_va->flags = 0;
-       bo_va->valid = false;
-       bo_va->ref_count = 1;
-       INIT_LIST_HEAD(&bo_va->bo_list);
-       INIT_LIST_HEAD(&bo_va->vm_list);
-
-       mutex_lock(&vm->mutex);
-       list_add(&bo_va->vm_list, &vm->va);
-       list_add_tail(&bo_va->bo_list, &bo->va);
-       mutex_unlock(&vm->mutex);
-
-       return bo_va;
-}
-
-/**
- * radeon_vm_bo_set_addr - set bos virtual address inside a vm
- *
- * @rdev: radeon_device pointer
- * @bo_va: bo_va to store the address
- * @soffset: requested offset of the buffer in the VM address space
- * @flags: attributes of pages (read/write/valid/etc.)
- *
- * Set offset of @bo_va (cayman+).
- * Validate and set the offset requested within the vm address space.
- * Returns 0 for success, error for failure.
- *
- * Object has to be reserved!
- */
-int radeon_vm_bo_set_addr(struct radeon_device *rdev,
-                         struct radeon_bo_va *bo_va,
-                         uint64_t soffset,
-                         uint32_t flags)
-{
-       uint64_t size = radeon_bo_size(bo_va->bo);
-       uint64_t eoffset, last_offset = 0;
-       struct radeon_vm *vm = bo_va->vm;
-       struct radeon_bo_va *tmp;
-       struct list_head *head;
-       unsigned last_pfn;
-
-       if (soffset) {
-               /* make sure object fit at this offset */
-               eoffset = soffset + size;
-               if (soffset >= eoffset) {
-                       return -EINVAL;
-               }
-
-               last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
-               if (last_pfn > rdev->vm_manager.max_pfn) {
-                       dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
-                               last_pfn, rdev->vm_manager.max_pfn);
-                       return -EINVAL;
-               }
-
-       } else {
-               eoffset = last_pfn = 0;
-       }
-
-       mutex_lock(&vm->mutex);
-       head = &vm->va;
-       last_offset = 0;
-       list_for_each_entry(tmp, &vm->va, vm_list) {
-               if (bo_va == tmp) {
-                       /* skip over currently modified bo */
-                       continue;
-               }
-
-               if (soffset >= last_offset && eoffset <= tmp->soffset) {
-                       /* bo can be added before this one */
-                       break;
-               }
-               if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
-                       /* bo and tmp overlap, invalid offset */
-                       dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
-                               bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
-                               (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
-                       mutex_unlock(&vm->mutex);
-                       return -EINVAL;
-               }
-               last_offset = tmp->eoffset;
-               head = &tmp->vm_list;
-       }
-
-       bo_va->soffset = soffset;
-       bo_va->eoffset = eoffset;
-       bo_va->flags = flags;
-       bo_va->valid = false;
-       list_move(&bo_va->vm_list, head);
-
-       mutex_unlock(&vm->mutex);
-       return 0;
-}
-
-/**
- * radeon_vm_map_gart - get the physical address of a gart page
- *
- * @rdev: radeon_device pointer
- * @addr: the unmapped addr
- *
- * Look up the physical address of the page that the pte resolves
- * to (cayman+).
- * Returns the physical address of the page.
- */
-uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
-{
-       uint64_t result;
-
-       /* page table offset */
-       result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
-       /* in case cpu page size != gpu page size*/
-       result |= addr & (~PAGE_MASK);
-
-       return result;
-}
-
-/**
- * radeon_vm_page_flags - translate page flags to what the hw uses
- *
- * @flags: flags comming from userspace
- *
- * Translate the flags the userspace ABI uses to hw flags.
- */
-static uint32_t radeon_vm_page_flags(uint32_t flags)
-{
-        uint32_t hw_flags = 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
-        if (flags & RADEON_VM_PAGE_SYSTEM) {
-                hw_flags |= R600_PTE_SYSTEM;
-                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
-        }
-        return hw_flags;
-}
-
-/**
- * radeon_vm_update_pdes - make sure that page directory is valid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory (cayman+).
- * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
- */
-static int radeon_vm_update_pdes(struct radeon_device *rdev,
-                                struct radeon_vm *vm,
-                                struct radeon_ib *ib,
-                                uint64_t start, uint64_t end)
-{
-       static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
-
-       uint64_t last_pde = ~0, last_pt = ~0;
-       unsigned count = 0;
-       uint64_t pt_idx;
-       int r;
-
-       start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
-       end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
-
-       /* walk over the address space and update the page directory */
-       for (pt_idx = start; pt_idx <= end; ++pt_idx) {
-               uint64_t pde, pt;
-
-               if (vm->page_tables[pt_idx])
-                       continue;
-
-retry:
-               r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
-                                    &vm->page_tables[pt_idx],
-                                    RADEON_VM_PTE_COUNT * 8,
-                                    RADEON_GPU_PAGE_SIZE, false);
-
-               if (r == -ENOMEM) {
-                       r = radeon_vm_evict(rdev, vm);
-                       if (r)
-                               return r;
-                       goto retry;
-               } else if (r) {
-                       return r;
-               }
-
-               pde = vm->pd_gpu_addr + pt_idx * 8;
-
-               pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
-
-               if (((last_pde + 8 * count) != pde) ||
-                   ((last_pt + incr * count) != pt)) {
-
-                       if (count) {
-                               radeon_asic_vm_set_page(rdev, ib, last_pde,
-                                                       last_pt, count, incr,
-                                                       R600_PTE_VALID);
-
-                               count *= RADEON_VM_PTE_COUNT;
-                               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
-                                                       count, 0, 0);
-                       }
-
-                       count = 1;
-                       last_pde = pde;
-                       last_pt = pt;
-               } else {
-                       ++count;
-               }
-       }
-
-       if (count) {
-               radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
-                                       incr, R600_PTE_VALID);
-
-               count *= RADEON_VM_PTE_COUNT;
-               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
-                                       count, 0, 0);
-       }
-
-       return 0;
-}
-
-/**
- * radeon_vm_update_ptes - make sure that page tables are valid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
-                                 struct radeon_vm *vm,
-                                 struct radeon_ib *ib,
-                                 uint64_t start, uint64_t end,
-                                 uint64_t dst, uint32_t flags)
-{
-       static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
-
-       uint64_t last_pte = ~0, last_dst = ~0;
-       unsigned count = 0;
-       uint64_t addr;
-
-       start = start / RADEON_GPU_PAGE_SIZE;
-       end = end / RADEON_GPU_PAGE_SIZE;
-
-       /* walk over the address space and update the page tables */
-       for (addr = start; addr < end; ) {
-               uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
-               unsigned nptes;
-               uint64_t pte;
-
-               if ((addr & ~mask) == (end & ~mask))
-                       nptes = end - addr;
-               else
-                       nptes = RADEON_VM_PTE_COUNT - (addr & mask);
-
-               pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
-               pte += (addr & mask) * 8;
-
-               if ((last_pte + 8 * count) != pte) {
-
-                       if (count) {
-                               radeon_asic_vm_set_page(rdev, ib, last_pte,
-                                                       last_dst, count,
-                                                       RADEON_GPU_PAGE_SIZE,
-                                                       flags);
-                       }
-
-                       count = nptes;
-                       last_pte = pte;
-                       last_dst = dst;
-               } else {
-                       count += nptes;
-               }
-
-               addr += nptes;
-               dst += nptes * RADEON_GPU_PAGE_SIZE;
-       }
-
-       if (count) {
-               radeon_asic_vm_set_page(rdev, ib, last_pte,
-                                       last_dst, count,
-                                       RADEON_GPU_PAGE_SIZE, flags);
-       }
-}
-
-/**
- * radeon_vm_bo_update - map a bo into the vm page table
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- * @mem: ttm mem
- *
- * Fill in the page table entries for @bo (cayman+).
- * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved & global and local mutex must be locked!
- */
-int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
-                       struct ttm_mem_reg *mem)
-{
-       struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
-       unsigned nptes, npdes, ndw;
-       uint64_t addr;
-       int r;
-
-       /* nothing to do if vm isn't bound */
-       if (vm->page_directory == NULL)
-               return 0;
-
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
-
-       if (!bo_va->soffset) {
-               dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
-               return -EINVAL;
-       }
-
-       if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
-               return 0;
-
-       bo_va->flags &= ~RADEON_VM_PAGE_VALID;
-       bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
-       if (mem) {
-               addr = mem->start << PAGE_SHIFT;
-               if (mem->mem_type != TTM_PL_SYSTEM) {
-                       bo_va->flags |= RADEON_VM_PAGE_VALID;
-                       bo_va->valid = true;
-               }
-               if (mem->mem_type == TTM_PL_TT) {
-                       bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
-               } else {
-                       addr += rdev->vm_manager.vram_base_offset;
-               }
-       } else {
-               addr = 0;
-               bo_va->valid = false;
-       }
-
-       trace_radeon_vm_bo_update(bo_va);
-
-       nptes = radeon_bo_ngpu_pages(bo);
-
-       /* assume two extra pdes in case the mapping overlaps the borders */
-       npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
-
-       /* padding, etc. */
-       ndw = 64;
-
-       if (RADEON_VM_BLOCK_SIZE > 11)
-               /* reserve space for one header for every 2k dwords */
-               ndw += (nptes >> 11) * 4;
-       else
-               /* reserve space for one header for
-                   every (1 << BLOCK_SIZE) entries */
-               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
-
-       /* reserve space for pte addresses */
-       ndw += nptes * 2;
-
-       /* reserve space for one header for every 2k dwords */
-       ndw += (npdes >> 11) * 4;
-
-       /* reserve space for pde addresses */
-       ndw += npdes * 2;
-
-       /* reserve space for clearing new page tables */
-       ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
-
-       /* update too big for an IB */
-       if (ndw > 0xfffff)
-               return -ENOMEM;
-
-       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
-       if (r)
-               return r;
-       ib.length_dw = 0;
-
-       r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
-       if (r) {
-               radeon_ib_free(rdev, &ib);
-               return r;
-       }
-
-       radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
-                             addr, radeon_vm_page_flags(bo_va->flags));
-
-       radeon_semaphore_sync_to(ib.semaphore, vm->fence);
-       r = radeon_ib_schedule(rdev, &ib, NULL);
-       if (r) {
-               radeon_ib_free(rdev, &ib);
-               return r;
-       }
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(ib.fence);
-       radeon_ib_free(rdev, &ib);
-       radeon_fence_unref(&vm->last_flush);
-
-       return 0;
-}
-
-/**
- * radeon_vm_bo_rmv - remove a bo to a specific vm
- *
- * @rdev: radeon_device pointer
- * @bo_va: requested bo_va
- *
- * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
- *
- * Object have to be reserved!
- */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
-{
-       int r = 0;
-
-       mutex_lock(&rdev->vm_manager.lock);
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset) {
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
-       }
-       mutex_unlock(&rdev->vm_manager.lock);
-       list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
-
-       kfree(bo_va);
-       return r;
-}
-
-/**
- * radeon_vm_bo_invalidate - mark the bo as invalid
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
- *
- * Mark @bo as invalid (cayman+).
- */
-void radeon_vm_bo_invalidate(struct radeon_device *rdev,
-                            struct radeon_bo *bo)
-{
-       struct radeon_bo_va *bo_va;
-
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               bo_va->valid = false;
-       }
-}
-
-/**
- * radeon_vm_init - initialize a vm instance
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- *
- * Init @vm fields (cayman+).
- */
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       vm->id = 0;
-       vm->fence = NULL;
-       vm->last_flush = NULL;
-       vm->last_id_use = NULL;
-       mutex_init(&vm->mutex);
-       INIT_LIST_HEAD(&vm->list);
-       INIT_LIST_HEAD(&vm->va);
-}
-
-/**
- * radeon_vm_fini - tear down a vm instance
- *
- * @rdev: radeon_device pointer
- * @vm: requested vm
- *
- * Tear down @vm (cayman+).
- * Unbind the VM and remove all bos from the vm bo list
- */
-void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       struct radeon_bo_va *bo_va, *tmp;
-       int r;
-
-       mutex_lock(&rdev->vm_manager.lock);
-       mutex_lock(&vm->mutex);
-       radeon_vm_free_pt(rdev, vm);
-       mutex_unlock(&rdev->vm_manager.lock);
-
-       if (!list_empty(&vm->va)) {
-               dev_err(rdev->dev, "still active bo inside vm\n");
-       }
-       list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
-               list_del_init(&bo_va->vm_list);
-               r = radeon_bo_reserve(bo_va->bo, false);
-               if (!r) {
-                       list_del_init(&bo_va->bo_list);
-                       radeon_bo_unreserve(bo_va->bo);
-                       kfree(bo_va);
-               }
-       }
-       radeon_fence_unref(&vm->fence);
-       radeon_fence_unref(&vm->last_flush);
-       radeon_fence_unref(&vm->last_id_use);
-       mutex_unlock(&vm->mutex);
-}
index b96c819024b3cdb7b2ea8f387c1e267a563ed8df..d09650c1d720fc74920577fc176f41f9b076a646 100644 (file)
@@ -344,18 +344,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, &cur_placement, true);
-       switch (cur_placement) {
-       case TTM_PL_VRAM:
-               args->domain = RADEON_GEM_DOMAIN_VRAM;
-               break;
-       case TTM_PL_TT:
-               args->domain = RADEON_GEM_DOMAIN_GTT;
-               break;
-       case TTM_PL_SYSTEM:
-               args->domain = RADEON_GEM_DOMAIN_CPU;
-       default:
-               break;
-       }
+       args->domain = radeon_mem_type_to_domain(cur_placement);
        drm_gem_object_unreference_unlocked(gobj);
        r = radeon_gem_handle_lockup(rdev, r);
        return r;
@@ -533,6 +522,42 @@ out:
        return r;
 }
 
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *filp)
+{
+       struct drm_radeon_gem_op *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_bo *robj;
+       int r;
+
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               return -ENOENT;
+       }
+       robj = gem_to_radeon_bo(gobj);
+       r = radeon_bo_reserve(robj, false);
+       if (unlikely(r))
+               goto out;
+
+       switch (args->op) {
+       case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
+               args->value = robj->initial_domain;
+               break;
+       case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
+               robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
+                                                     RADEON_GEM_DOMAIN_GTT |
+                                                     RADEON_GEM_DOMAIN_CPU);
+               break;
+       default:
+               r = -EINVAL;
+       }
+
+       radeon_bo_unreserve(robj);
+out:
+       drm_gem_object_unreference_unlocked(gobj);
+       return r;
+}
+
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args)
index baff98be65b1ea9e0b0f068e91f31d7245a43c69..37b1deacb5b1674871e1842adf64a10fc430f0c6 100644 (file)
@@ -486,6 +486,21 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
        case RADEON_INFO_VCE_FB_VERSION:
                *value = rdev->vce.fb_version;
                break;
+       case RADEON_INFO_NUM_BYTES_MOVED:
+               value = (uint32_t*)&value64;
+               value_size = sizeof(uint64_t);
+               value64 = atomic64_read(&rdev->num_bytes_moved);
+               break;
+       case RADEON_INFO_VRAM_USAGE:
+               value = (uint32_t*)&value64;
+               value_size = sizeof(uint64_t);
+               value64 = atomic64_read(&rdev->vram_usage);
+               break;
+       case RADEON_INFO_GTT_USAGE:
+               value = (uint32_t*)&value64;
+               value_size = sizeof(uint64_t);
+               value64 = atomic64_read(&rdev->gtt_usage);
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
@@ -544,7 +559,13 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               radeon_vm_init(rdev, &fpriv->vm);
+               r = radeon_vm_init(rdev, &fpriv->vm);
+               if (r)
+                       return r;
+
+               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+               if (r)
+                       return r;
 
                /* map the ib pool buffer read only into
                 * virtual address space */
@@ -553,6 +574,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
                                          RADEON_VM_PAGE_READABLE |
                                          RADEON_VM_PAGE_SNOOPED);
+
+               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                if (r) {
                        radeon_vm_fini(rdev, &fpriv->vm);
                        kfree(fpriv);
@@ -814,5 +837,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
index 08595cf90b0139ee0da24f4ac3725e30dcbf9d68..ca79431b2c1cb0689cfdef39225a5e61f5a6f355 100644 (file)
@@ -56,11 +56,36 @@ static void radeon_bo_clear_va(struct radeon_bo *bo)
        }
 }
 
+static void radeon_update_memory_usage(struct radeon_bo *bo,
+                                      unsigned mem_type, int sign)
+{
+       struct radeon_device *rdev = bo->rdev;
+       u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
+
+       switch (mem_type) {
+       case TTM_PL_TT:
+               if (sign > 0)
+                       atomic64_add(size, &rdev->gtt_usage);
+               else
+                       atomic64_sub(size, &rdev->gtt_usage);
+               break;
+       case TTM_PL_VRAM:
+               if (sign > 0)
+                       atomic64_add(size, &rdev->vram_usage);
+               else
+                       atomic64_sub(size, &rdev->vram_usage);
+               break;
+       }
+}
+
 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct radeon_bo *bo;
 
        bo = container_of(tbo, struct radeon_bo, tbo);
+
+       radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+
        mutex_lock(&bo->rdev->gem.mutex);
        list_del_init(&bo->list);
        mutex_unlock(&bo->rdev->gem.mutex);
@@ -145,6 +170,9 @@ int radeon_bo_create(struct radeon_device *rdev,
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
        INIT_LIST_HEAD(&bo->va);
+       bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
+                                      RADEON_GEM_DOMAIN_GTT |
+                                      RADEON_GEM_DOMAIN_CPU);
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
@@ -338,39 +366,105 @@ void radeon_bo_fini(struct radeon_device *rdev)
        arch_phys_wc_del(rdev->mc.vram_mtrr);
 }
 
-void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
-                               struct list_head *head)
+/* Returns how many bytes TTM can move per IB.
+ */
+static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
 {
-       if (lobj->written) {
-               list_add(&lobj->tv.head, head);
-       } else {
-               list_add_tail(&lobj->tv.head, head);
-       }
+       u64 real_vram_size = rdev->mc.real_vram_size;
+       u64 vram_usage = atomic64_read(&rdev->vram_usage);
+
+       /* This function is based on the current VRAM usage.
+        *
+        * - If all of VRAM is free, allow relocating the number of bytes that
+        *   is equal to 1/4 of the size of VRAM for this IB.
+
+        * - If more than one half of VRAM is occupied, only allow relocating
+        *   1 MB of data for this IB.
+        *
+        * - From 0 to one half of used VRAM, the threshold decreases
+        *   linearly.
+        *         __________________
+        * 1/4 of -|\               |
+        * VRAM    | \              |
+        *         |  \             |
+        *         |   \            |
+        *         |    \           |
+        *         |     \          |
+        *         |      \         |
+        *         |       \________|1 MB
+        *         |----------------|
+        *    VRAM 0 %             100 %
+        *         used            used
+        *
+        * Note: It's a threshold, not a limit. The threshold must be crossed
+        * for buffer relocations to stop, so any buffer of an arbitrary size
+        * can be moved as long as the threshold isn't crossed before
+        * the relocation takes place. We don't want to disable buffer
+        * relocations completely.
+        *
+        * The idea is that buffers should be placed in VRAM at creation time
+        * and TTM should only do a minimum number of relocations during
+        * command submission. In practice, you need to submit at least
+        * a dozen IBs to move all buffers to VRAM if they are in GTT.
+        *
+        * Also, things can get pretty crazy under memory pressure and actual
+        * VRAM usage can change a lot, so playing safe even at 50% does
+        * consistently increase performance.
+        */
+
+       u64 half_vram = real_vram_size >> 1;
+       u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
+       u64 bytes_moved_threshold = half_free_vram >> 1;
+       return max(bytes_moved_threshold, 1024*1024ull);
 }
 
-int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+int radeon_bo_list_validate(struct radeon_device *rdev,
+                           struct ww_acquire_ctx *ticket,
                            struct list_head *head, int ring)
 {
-       struct radeon_bo_list *lobj;
+       struct radeon_cs_reloc *lobj;
        struct radeon_bo *bo;
-       u32 domain;
        int r;
+       u64 bytes_moved = 0, initial_bytes_moved;
+       u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
        r = ttm_eu_reserve_buffers(ticket, head);
        if (unlikely(r != 0)) {
                return r;
        }
+
        list_for_each_entry(lobj, head, tv.head) {
-               bo = lobj->bo;
+               bo = lobj->robj;
                if (!bo->pin_count) {
-                       domain = lobj->domain;
-                       
+                       u32 domain = lobj->domain;
+                       u32 current_domain =
+                               radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+                       /* Check if this buffer will be moved and don't move it
+                        * if we have moved too many buffers for this IB already.
+                        *
+                        * Note that this allows moving at least one buffer of
+                        * any size, because it doesn't take the current "bo"
+                        * into account. We don't want to disallow buffer moves
+                        * completely.
+                        */
+                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                           (domain & current_domain) == 0 && /* will be moved */
+                           bytes_moved > bytes_moved_threshold) {
+                               /* don't move it */
+                               domain = current_domain;
+                       }
+
                retry:
                        radeon_ttm_placement_from_domain(bo, domain);
                        if (ring == R600_RING_TYPE_UVD_INDEX)
                                radeon_uvd_force_into_uvd_segment(bo);
-                       r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false);
+
+                       initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+                       bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
+                                      initial_bytes_moved;
+
                        if (unlikely(r)) {
                                if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
                                        domain = lobj->alt_domain;
@@ -564,14 +658,23 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 }
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-                          struct ttm_mem_reg *mem)
+                          struct ttm_mem_reg *new_mem)
 {
        struct radeon_bo *rbo;
+
        if (!radeon_ttm_bo_is_radeon_bo(bo))
                return;
+
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 1);
        radeon_vm_bo_invalidate(rbo->rdev, rbo);
+
+       /* update statistics */
+       if (!new_mem)
+               return;
+
+       radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+       radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 }
 
 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
index 209b1115026379dccef518bf60ab9fa68217d9ab..9e7b25a0629d3a249720628aa697b9ff8a0f4a6f 100644 (file)
@@ -138,9 +138,8 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev);
 extern void radeon_bo_force_delete(struct radeon_device *rdev);
 extern int radeon_bo_init(struct radeon_device *rdev);
 extern void radeon_bo_fini(struct radeon_device *rdev);
-extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
-                               struct list_head *head);
-extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+extern int radeon_bo_list_validate(struct radeon_device *rdev,
+                                  struct ww_acquire_ctx *ticket,
                                   struct list_head *head, int ring);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
                                struct vm_area_struct *vma);
@@ -151,7 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-                                       struct ttm_mem_reg *mem);
+                                 struct ttm_mem_reg *new_mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 
@@ -181,7 +180,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
 extern int radeon_sa_bo_new(struct radeon_device *rdev,
                            struct radeon_sa_manager *sa_manager,
                            struct radeon_sa_bo **sa_bo,
-                           unsigned size, unsigned align, bool block);
+                           unsigned size, unsigned align);
 extern void radeon_sa_bo_free(struct radeon_device *rdev,
                              struct radeon_sa_bo **sa_bo,
                              struct radeon_fence *fence);
index 4ad9af9fc517cffeac548c9dd83d54d3633a192c..ee738a524639e41e75c7af5279c583e4b3c7ee10 100644 (file)
@@ -260,7 +260,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
                if (!ring->ready) {
                        continue;
                }
-               r = radeon_fence_wait_empty_locked(rdev, i);
+               r = radeon_fence_wait_empty(rdev, i);
                if (r) {
                        /* needs a GPU reset dont reset here */
                        mutex_unlock(&rdev->ring_lock);
@@ -896,7 +896,7 @@ force:
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
                struct radeon_ring *ring = &rdev->ring[i];
                if (ring->ready)
-                       radeon_fence_wait_empty_locked(rdev, i);
+                       radeon_fence_wait_empty(rdev, i);
        }
 
        /* program the new power state */
@@ -943,8 +943,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                if (enable) {
                        mutex_lock(&rdev->pm.mutex);
                        rdev->pm.dpm.uvd_active = true;
-                       /* disable this for now */
-#if 0
                        if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
                        else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -954,7 +952,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                        else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
                        else
-#endif
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
                        rdev->pm.dpm.state = dpm_state;
                        mutex_unlock(&rdev->pm.mutex);
index b14c86d57607f8e26d444f26de3c941ca813194a..8b0dfdd23793abcf1646f04d5611bf3ec3ad837f 100644 (file)
@@ -63,7 +63,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
 {
        int r;
 
-       r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+       r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
        if (r) {
                dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
                return r;
@@ -145,6 +145,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
                return r;
        }
 
+       /* grab a vm id if necessary */
+       if (ib->vm) {
+               struct radeon_fence *vm_id_fence;
+               vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+               radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+       }
+
        /* sync with other rings */
        r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
        if (r) {
@@ -153,11 +160,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
                return r;
        }
 
-       /* if we can't remember our last VM flush then flush now! */
-       /* XXX figure out why we have to flush for every IB */
-       if (ib->vm /*&& !ib->vm->last_flush*/) {
-               radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
-       }
+       if (ib->vm)
+               radeon_vm_flush(rdev, ib->vm, ib->ring);
+
        if (const_ib) {
                radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
                radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -172,10 +177,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
        if (const_ib) {
                const_ib->fence = radeon_fence_ref(ib->fence);
        }
-       /* we just flushed the VM, remember that */
-       if (ib->vm && !ib->vm->last_flush) {
-               ib->vm->last_flush = radeon_fence_ref(ib->fence);
-       }
+
+       if (ib->vm)
+               radeon_vm_fence(rdev, ib->vm, ib->fence);
+
        radeon_ring_unlock_commit(rdev, ring);
        return 0;
 }
@@ -382,7 +387,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
                if (ndw < ring->ring_free_dw) {
                        break;
                }
-               r = radeon_fence_wait_next_locked(rdev, ring->idx);
+               r = radeon_fence_wait_next(rdev, ring->idx);
                if (r)
                        return r;
        }
@@ -485,8 +490,8 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
 void radeon_ring_lockup_update(struct radeon_device *rdev,
                               struct radeon_ring *ring)
 {
-       ring->last_rptr = radeon_ring_get_rptr(rdev, ring);
-       ring->last_activity = jiffies;
+       atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
+       atomic64_set(&ring->last_activity, jiffies_64);
 }
 
 /**
@@ -498,22 +503,19 @@ void radeon_ring_lockup_update(struct radeon_device *rdev,
 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
-       unsigned long cjiffies, elapsed;
+       uint64_t last = atomic64_read(&ring->last_activity);
+       uint64_t elapsed;
 
-       cjiffies = jiffies;
-       if (!time_after(cjiffies, ring->last_activity)) {
-               /* likely a wrap around */
+       if (rptr != atomic_read(&ring->last_rptr)) {
+               /* ring is still working, no lockup */
                radeon_ring_lockup_update(rdev, ring);
                return false;
        }
-       if (rptr != ring->last_rptr) {
-               /* CP is still working no lockup */
-               radeon_ring_lockup_update(rdev, ring);
-               return false;
-       }
-       elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+
+       elapsed = jiffies_to_msecs(jiffies_64 - last);
        if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
-               dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+               dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
+                       ring->idx, elapsed);
                return true;
        }
        /* give a chance to the GPU ... */
index c0625805cdd769b826d0605141f6cc80ce80ad72..adcf3e2f07da5ac10adc84a9c6c6d85b631ff87d 100644 (file)
@@ -312,7 +312,7 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
 int radeon_sa_bo_new(struct radeon_device *rdev,
                     struct radeon_sa_manager *sa_manager,
                     struct radeon_sa_bo **sa_bo,
-                    unsigned size, unsigned align, bool block)
+                    unsigned size, unsigned align)
 {
        struct radeon_fence *fences[RADEON_NUM_RINGS];
        unsigned tries[RADEON_NUM_RINGS];
@@ -353,14 +353,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
                r = radeon_fence_wait_any(rdev, fences, false);
                spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */
-               if (r == -ENOENT && block) {
+               if (r == -ENOENT) {
                        r = wait_event_interruptible_locked(
                                sa_manager->wq, 
                                radeon_sa_event(sa_manager, size, align)
                        );
-
-               } else if (r == -ENOENT) {
-                       r = -ENOMEM;
                }
 
        } while (!r);
index 9006b32d5eed0433d336ef56e1bfb17c6fffa0de..dbd6bcde92de412a87379d7da23a4cea74b40649 100644 (file)
@@ -42,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
                return -ENOMEM;
        }
        r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
-                            8 * RADEON_NUM_SYNCS, 8, true);
+                            8 * RADEON_NUM_SYNCS, 8);
        if (r) {
                kfree(*semaphore);
                *semaphore = NULL;
@@ -147,7 +147,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
 
                if (++count > RADEON_NUM_SYNCS) {
                        /* not enough room, wait manually */
-                       radeon_fence_wait_locked(fence);
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
                        continue;
                }
 
@@ -161,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
                if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
                        /* signaling wasn't successful wait manually */
                        radeon_ring_undo(&rdev->ring[i]);
-                       radeon_fence_wait_locked(fence);
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
                        continue;
                }
 
@@ -169,7 +173,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
                if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
                        /* waiting wasn't successful wait manually */
                        radeon_ring_undo(&rdev->ring[i]);
-                       radeon_fence_wait_locked(fence);
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
                        continue;
                }
 
index 77f5b0c3edb8d8b1f4835620d626c3981171c7a6..60dfce889ecfb3eac52f79b784b42dfcb68d8f84 100644 (file)
@@ -406,8 +406,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        if (r) {
 memcpy:
                r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               if (r) {
+                       return r;
+               }
        }
-       return r;
+
+       /* update statistics */
+       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+       return 0;
 }
 
 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
index 6781fee1eaadc21a68e50de696dd353be0a3e9e5..6a2e3ff683748a83ba69dab5edd0e88fc4e469cb 100644 (file)
@@ -453,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
        }
 
        reloc = p->relocs_ptr[(idx / 4)];
-       start = reloc->lobj.gpu_offset;
+       start = reloc->gpu_offset;
        end = start + radeon_bo_size(reloc->robj);
        start += offset;
 
@@ -805,8 +805,7 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
                    (rdev->pm.dpm.hd != hd)) {
                        rdev->pm.dpm.sd = sd;
                        rdev->pm.dpm.hd = hd;
-                       /* disable this for now */
-                       /*streams_changed = true;*/
+                       streams_changed = true;
                }
        }
 
index d130432e313a41345759c29fcc0f5b51d07e2c71..76e9904bc5377c23ea33c842137266ed18d560e8 100644 (file)
@@ -119,7 +119,7 @@ int radeon_vce_init(struct radeon_device *rdev)
        if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
                return -EINVAL;
 
-       /* load firmware into VRAM */
+       /* allocate firmware, stack and heap BO */
 
        size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
               RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
@@ -130,16 +130,21 @@ int radeon_vce_init(struct radeon_device *rdev)
                return r;
        }
 
-       r = radeon_vce_resume(rdev);
-       if (r)
+       r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
+       if (r) {
+               radeon_bo_unref(&rdev->vce.vcpu_bo);
+               dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
                return r;
+       }
 
-       memset(rdev->vce.cpu_addr, 0, size);
-       memcpy(rdev->vce.cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
-
-       r = radeon_vce_suspend(rdev);
-       if (r)
+       r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+                         &rdev->vce.gpu_addr);
+       radeon_bo_unreserve(rdev->vce.vcpu_bo);
+       if (r) {
+               radeon_bo_unref(&rdev->vce.vcpu_bo);
+               dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
                return r;
+       }
 
        for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
                atomic_set(&rdev->vce.handles[i], 0);
@@ -158,8 +163,12 @@ int radeon_vce_init(struct radeon_device *rdev)
  */
 void radeon_vce_fini(struct radeon_device *rdev)
 {
-       radeon_vce_suspend(rdev);
+       if (rdev->vce.vcpu_bo == NULL)
+               return;
+
        radeon_bo_unref(&rdev->vce.vcpu_bo);
+
+       release_firmware(rdev->vce_fw);
 }
 
 /**
@@ -167,22 +176,23 @@ void radeon_vce_fini(struct radeon_device *rdev)
  *
  * @rdev: radeon_device pointer
  *
- * TODO: Test VCE suspend/resume
  */
 int radeon_vce_suspend(struct radeon_device *rdev)
 {
-       int r;
+       int i;
 
        if (rdev->vce.vcpu_bo == NULL)
                return 0;
 
-       r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
-       if (!r) {
-               radeon_bo_kunmap(rdev->vce.vcpu_bo);
-               radeon_bo_unpin(rdev->vce.vcpu_bo);
-               radeon_bo_unreserve(rdev->vce.vcpu_bo);
-       }
-       return r;
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+               if (atomic_read(&rdev->vce.handles[i]))
+                       break;
+
+       if (i == RADEON_MAX_VCE_HANDLES)
+               return 0;
+
+       /* TODO: suspending running encoding sessions isn't supported */
+       return -EINVAL;
 }
 
 /**
@@ -190,10 +200,10 @@ int radeon_vce_suspend(struct radeon_device *rdev)
  *
  * @rdev: radeon_device pointer
  *
- * TODO: Test VCE suspend/resume
  */
 int radeon_vce_resume(struct radeon_device *rdev)
 {
+       void *cpu_addr;
        int r;
 
        if (rdev->vce.vcpu_bo == NULL)
@@ -201,26 +211,21 @@ int radeon_vce_resume(struct radeon_device *rdev)
 
        r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
        if (r) {
-               radeon_bo_unref(&rdev->vce.vcpu_bo);
                dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
                return r;
        }
 
-       r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
-                         &rdev->vce.gpu_addr);
+       r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
        if (r) {
                radeon_bo_unreserve(rdev->vce.vcpu_bo);
-               radeon_bo_unref(&rdev->vce.vcpu_bo);
-               dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
-               return r;
-       }
-
-       r = radeon_bo_kmap(rdev->vce.vcpu_bo, &rdev->vce.cpu_addr);
-       if (r) {
                dev_err(rdev->dev, "(%d) VCE map failed\n", r);
                return r;
        }
 
+       memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
+
+       radeon_bo_kunmap(rdev->vce.vcpu_bo);
+
        radeon_bo_unreserve(rdev->vce.vcpu_bo);
 
        return 0;
@@ -456,7 +461,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->lobj.gpu_offset;
+       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
 
         p->ib.ptr[lo] = offset & 0xFFFFFFFF;
         p->ib.ptr[hi] = offset >> 32;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
new file mode 100644 (file)
index 0000000..2aae6ce
--- /dev/null
@@ -0,0 +1,966 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time.  The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID.  When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer.  VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+       return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+       return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (!rdev->vm_manager.enabled) {
+               r = radeon_asic_vm_init(rdev);
+               if (r)
+                       return r;
+
+               rdev->vm_manager.enabled = true;
+       }
+       return 0;
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+       int i;
+
+       if (!rdev->vm_manager.enabled)
+               return;
+
+       for (i = 0; i < RADEON_NUM_VM; ++i)
+               radeon_fence_unref(&rdev->vm_manager.active[i]);
+       radeon_asic_vm_fini(rdev);
+       rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_get_bos - add the vm BOs to a validation list
+ *
+ * @vm: vm providing the BOs
+ * @head: head of validation list
+ *
+ * Add the page directory to the list of BOs to
+ * validate for command submission (cayman+).
+ */
+struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+                                         struct radeon_vm *vm,
+                                         struct list_head *head)
+{
+       struct radeon_cs_reloc *list;
+       unsigned i, idx, size;
+
+       size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
+       list = kmalloc(size, GFP_KERNEL);
+       if (!list)
+               return NULL;
+
+       /* add the vm page table to the list */
+       list[0].gobj = NULL;
+       list[0].robj = vm->page_directory;
+       list[0].domain = RADEON_GEM_DOMAIN_VRAM;
+       list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+       list[0].tv.bo = &vm->page_directory->tbo;
+       list[0].tiling_flags = 0;
+       list[0].handle = 0;
+       list_add(&list[0].tv.head, head);
+
+       for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
+               if (!vm->page_tables[i].bo)
+                       continue;
+
+               list[idx].gobj = NULL;
+               list[idx].robj = vm->page_tables[i].bo;
+               list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
+               list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+               list[idx].tv.bo = &list[idx].robj->tbo;
+               list[idx].tiling_flags = 0;
+               list[idx].handle = 0;
+               list_add(&list[idx++].tv.head, head);
+       }
+
+       return list;
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+                                      struct radeon_vm *vm, int ring)
+{
+       struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+       unsigned choices[2] = {};
+       unsigned i;
+
+       /* check if the id is still valid */
+       if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
+               return NULL;
+
+       /* we definately need to flush */
+       radeon_fence_unref(&vm->last_flush);
+
+       /* skip over VMID 0, since it is the system VM */
+       for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+               struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+               if (fence == NULL) {
+                       /* found a free one */
+                       vm->id = i;
+                       trace_radeon_vm_grab_id(vm->id, ring);
+                       return NULL;
+               }
+
+               if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+                       best[fence->ring] = fence;
+                       choices[fence->ring == ring ? 0 : 1] = i;
+               }
+       }
+
+       for (i = 0; i < 2; ++i) {
+               if (choices[i]) {
+                       vm->id = choices[i];
+                       trace_radeon_vm_grab_id(vm->id, ring);
+                       return rdev->vm_manager.active[choices[i]];
+               }
+       }
+
+       /* should never happen */
+       BUG();
+       return NULL;
+}
+
+/**
+ * radeon_vm_flush - hardware flush the vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to flush
+ * @ring: ring to use for flush
+ *
+ * Flush the vm (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_flush(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    int ring)
+{
+       uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+
+       /* if we can't remember our last VM flush then flush now! */
+       /* XXX figure out why we have to flush all the time */
+       if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+               vm->pd_gpu_addr = pd_addr;
+               radeon_ring_vm_flush(rdev, ring, vm);
+       }
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_fence *fence)
+{
+       radeon_fence_unref(&vm->fence);
+       vm->fence = radeon_fence_ref(fence);
+
+       radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+       rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+       radeon_fence_unref(&vm->last_id_use);
+       vm->last_id_use = radeon_fence_ref(fence);
+
+        /* we just flushed the VM, remember that */
+        if (!vm->last_flush)
+                vm->last_flush = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+                                      struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       list_for_each_entry(bo_va, &bo->va, bo_list) {
+               if (bo_va->vm == vm) {
+                       return bo_va;
+               }
+       }
+       return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+                                     struct radeon_vm *vm,
+                                     struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+       if (bo_va == NULL) {
+               return NULL;
+       }
+       bo_va->vm = vm;
+       bo_va->bo = bo;
+       bo_va->soffset = 0;
+       bo_va->eoffset = 0;
+       bo_va->flags = 0;
+       bo_va->valid = false;
+       bo_va->ref_count = 1;
+       INIT_LIST_HEAD(&bo_va->bo_list);
+       INIT_LIST_HEAD(&bo_va->vm_list);
+
+       mutex_lock(&vm->mutex);
+       list_add(&bo_va->vm_list, &vm->va);
+       list_add_tail(&bo_va->bo_list, &bo->va);
+       mutex_unlock(&vm->mutex);
+
+       return bo_va;
+}
+
+/**
+ * radeon_vm_clear_bo - initially clear the page dir/table
+ *
+ * @rdev: radeon_device pointer
+ * @bo: bo to clear
+ */
+static int radeon_vm_clear_bo(struct radeon_device *rdev,
+                             struct radeon_bo *bo)
+{
+        struct ttm_validate_buffer tv;
+        struct ww_acquire_ctx ticket;
+        struct list_head head;
+       struct radeon_ib ib;
+       unsigned entries;
+       uint64_t addr;
+       int r;
+
+        memset(&tv, 0, sizeof(tv));
+        tv.bo = &bo->tbo;
+
+        INIT_LIST_HEAD(&head);
+        list_add(&tv.head, &head);
+
+        r = ttm_eu_reserve_buffers(&ticket, &head);
+        if (r)
+               return r;
+
+        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+        if (r)
+                goto error;
+
+       addr = radeon_bo_gpu_offset(bo);
+       entries = radeon_bo_size(bo) / 8;
+
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+                         NULL, entries * 2 + 64);
+       if (r)
+                goto error;
+
+       ib.length_dw = 0;
+
+       radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r)
+                goto error;
+
+       ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+       radeon_ib_free(rdev, &ib);
+
+       return 0;
+
+error:
+       ttm_eu_backoff_reservation(&ticket, &head);
+       return r;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+                         struct radeon_bo_va *bo_va,
+                         uint64_t soffset,
+                         uint32_t flags)
+{
+       uint64_t size = radeon_bo_size(bo_va->bo);
+       uint64_t eoffset, last_offset = 0;
+       struct radeon_vm *vm = bo_va->vm;
+       struct radeon_bo_va *tmp;
+       struct list_head *head;
+       unsigned last_pfn, pt_idx;
+       int r;
+
+       if (soffset) {
+               /* make sure object fit at this offset */
+               eoffset = soffset + size;
+               if (soffset >= eoffset) {
+                       return -EINVAL;
+               }
+
+               last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+               if (last_pfn > rdev->vm_manager.max_pfn) {
+                       dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+                               last_pfn, rdev->vm_manager.max_pfn);
+                       return -EINVAL;
+               }
+
+       } else {
+               eoffset = last_pfn = 0;
+       }
+
+       mutex_lock(&vm->mutex);
+       head = &vm->va;
+       last_offset = 0;
+       list_for_each_entry(tmp, &vm->va, vm_list) {
+               if (bo_va == tmp) {
+                       /* skip over currently modified bo */
+                       continue;
+               }
+
+               if (soffset >= last_offset && eoffset <= tmp->soffset) {
+                       /* bo can be added before this one */
+                       break;
+               }
+               if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+                       /* bo and tmp overlap, invalid offset */
+                       dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+                               bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+                               (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+                       mutex_unlock(&vm->mutex);
+                       return -EINVAL;
+               }
+               last_offset = tmp->eoffset;
+               head = &tmp->vm_list;
+       }
+
+       bo_va->soffset = soffset;
+       bo_va->eoffset = eoffset;
+       bo_va->flags = flags;
+       bo_va->valid = false;
+       list_move(&bo_va->vm_list, head);
+
+       soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+       eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+       if (eoffset > vm->max_pde_used)
+               vm->max_pde_used = eoffset;
+
+       radeon_bo_unreserve(bo_va->bo);
+
+       /* walk over the address space and allocate the page tables */
+       for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
+               struct radeon_bo *pt;
+
+               if (vm->page_tables[pt_idx].bo)
+                       continue;
+
+               /* drop mutex to allocate and clear page table */
+               mutex_unlock(&vm->mutex);
+
+               r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
+                                    RADEON_GPU_PAGE_SIZE, false, 
+                                    RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+               if (r)
+                       return r;
+
+               r = radeon_vm_clear_bo(rdev, pt);
+               if (r) {
+                       radeon_bo_unref(&pt);
+                       radeon_bo_reserve(bo_va->bo, false);
+                       return r;
+               }
+
+               /* aquire mutex again */
+               mutex_lock(&vm->mutex);
+               if (vm->page_tables[pt_idx].bo) {
+                       /* someone else allocated the pt in the meantime */
+                       mutex_unlock(&vm->mutex);
+                       radeon_bo_unref(&pt);
+                       mutex_lock(&vm->mutex);
+                       continue;
+               }
+
+               vm->page_tables[pt_idx].addr = 0;
+               vm->page_tables[pt_idx].bo = pt;
+       }
+
+       mutex_unlock(&vm->mutex);
+       return radeon_bo_reserve(bo_va->bo, false);
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+       uint64_t result;
+
+       /* page table offset */
+       result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+       /* in case cpu page size != gpu page size*/
+       result |= addr & (~PAGE_MASK);
+
+       return result;
+}
+
+/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+        uint32_t hw_flags = 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+        if (flags & RADEON_VM_PAGE_SYSTEM) {
+                hw_flags |= R600_PTE_SYSTEM;
+                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+        }
+        return hw_flags;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+                                   struct radeon_vm *vm)
+{
+       static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+       uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+       uint64_t last_pde = ~0, last_pt = ~0;
+       unsigned count = 0, pt_idx, ndw;
+       struct radeon_ib ib;
+       int r;
+
+       /* padding, etc. */
+       ndw = 64;
+
+       /* assume the worst case */
+       ndw += vm->max_pde_used * 12;
+
+       /* update too big for an IB */
+       if (ndw > 0xfffff)
+               return -ENOMEM;
+
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+       if (r)
+               return r;
+       ib.length_dw = 0;
+
+       /* walk over the address space and update the page directory */
+       for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+               struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
+               uint64_t pde, pt;
+
+               if (bo == NULL)
+                       continue;
+
+               pt = radeon_bo_gpu_offset(bo);
+               if (vm->page_tables[pt_idx].addr == pt)
+                       continue;
+               vm->page_tables[pt_idx].addr = pt;
+
+               pde = pd_addr + pt_idx * 8;
+               if (((last_pde + 8 * count) != pde) ||
+                   ((last_pt + incr * count) != pt)) {
+
+                       if (count) {
+                               radeon_asic_vm_set_page(rdev, &ib, last_pde,
+                                                       last_pt, count, incr,
+                                                       R600_PTE_VALID);
+                       }
+
+                       count = 1;
+                       last_pde = pde;
+                       last_pt = pt;
+               } else {
+                       ++count;
+               }
+       }
+
+       if (count)
+               radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
+                                       incr, R600_PTE_VALID);
+
+       if (ib.length_dw != 0) {
+               radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+               r = radeon_ib_schedule(rdev, &ib, NULL);
+               if (r) {
+                       radeon_ib_free(rdev, &ib);
+                       return r;
+               }
+               radeon_fence_unref(&vm->fence);
+               vm->fence = radeon_fence_ref(ib.fence);
+               radeon_fence_unref(&vm->last_flush);
+       }
+       radeon_ib_free(rdev, &ib);
+
+       return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+                                 struct radeon_vm *vm,
+                                 struct radeon_ib *ib,
+                                 uint64_t start, uint64_t end,
+                                 uint64_t dst, uint32_t flags)
+{
+       static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+       uint64_t last_pte = ~0, last_dst = ~0;
+       unsigned count = 0;
+       uint64_t addr;
+
+       start = start / RADEON_GPU_PAGE_SIZE;
+       end = end / RADEON_GPU_PAGE_SIZE;
+
+       /* walk over the address space and update the page tables */
+       for (addr = start; addr < end; ) {
+               uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+               unsigned nptes;
+               uint64_t pte;
+
+               if ((addr & ~mask) == (end & ~mask))
+                       nptes = end - addr;
+               else
+                       nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+               pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
+               pte += (addr & mask) * 8;
+
+               if ((last_pte + 8 * count) != pte) {
+
+                       if (count) {
+                               radeon_asic_vm_set_page(rdev, ib, last_pte,
+                                                       last_dst, count,
+                                                       RADEON_GPU_PAGE_SIZE,
+                                                       flags);
+                       }
+
+                       count = nptes;
+                       last_pte = pte;
+                       last_dst = dst;
+               } else {
+                       count += nptes;
+               }
+
+               addr += nptes;
+               dst += nptes * RADEON_GPU_PAGE_SIZE;
+       }
+
+       if (count) {
+               radeon_asic_vm_set_page(rdev, ib, last_pte,
+                                       last_dst, count,
+                                       RADEON_GPU_PAGE_SIZE, flags);
+       }
+}
+
+/**
+ * radeon_vm_bo_update - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved and mutex must be locked!
+ */
+int radeon_vm_bo_update(struct radeon_device *rdev,
+                       struct radeon_vm *vm,
+                       struct radeon_bo *bo,
+                       struct ttm_mem_reg *mem)
+{
+       struct radeon_ib ib;
+       struct radeon_bo_va *bo_va;
+       unsigned nptes, ndw;
+       uint64_t addr;
+       int r;
+
+       bo_va = radeon_vm_bo_find(vm, bo);
+       if (bo_va == NULL) {
+               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+               return -EINVAL;
+       }
+
+       if (!bo_va->soffset) {
+               dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+                       bo, vm);
+               return -EINVAL;
+       }
+
+       if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+               return 0;
+
+       bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+       bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+       if (mem) {
+               addr = mem->start << PAGE_SHIFT;
+               if (mem->mem_type != TTM_PL_SYSTEM) {
+                       bo_va->flags |= RADEON_VM_PAGE_VALID;
+                       bo_va->valid = true;
+               }
+               if (mem->mem_type == TTM_PL_TT) {
+                       bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+               } else {
+                       addr += rdev->vm_manager.vram_base_offset;
+               }
+       } else {
+               addr = 0;
+               bo_va->valid = false;
+       }
+
+       trace_radeon_vm_bo_update(bo_va);
+
+       nptes = radeon_bo_ngpu_pages(bo);
+
+       /* padding, etc. */
+       ndw = 64;
+
+       if (RADEON_VM_BLOCK_SIZE > 11)
+               /* reserve space for one header for every 2k dwords */
+               ndw += (nptes >> 11) * 4;
+       else
+               /* reserve space for one header for
+                   every (1 << BLOCK_SIZE) entries */
+               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+       /* reserve space for pte addresses */
+       ndw += nptes * 2;
+
+       /* update too big for an IB */
+       if (ndw > 0xfffff)
+               return -ENOMEM;
+
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+       if (r)
+               return r;
+       ib.length_dw = 0;
+
+       radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
+                             addr, radeon_vm_page_flags(bo_va->flags));
+
+       radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               return r;
+       }
+       radeon_fence_unref(&vm->fence);
+       vm->fence = radeon_fence_ref(ib.fence);
+       radeon_ib_free(rdev, &ib);
+       radeon_fence_unref(&vm->last_flush);
+
+       return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+                    struct radeon_bo_va *bo_va)
+{
+       int r = 0;
+
+       mutex_lock(&bo_va->vm->mutex);
+       if (bo_va->soffset)
+               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+
+       list_del(&bo_va->vm_list);
+       mutex_unlock(&bo_va->vm->mutex);
+       list_del(&bo_va->bo_list);
+
+       kfree(bo_va);
+       return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+                            struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       list_for_each_entry(bo_va, &bo->va, bo_list) {
+               bo_va->valid = false;
+       }
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       unsigned pd_size, pd_entries, pts_size;
+       int r;
+
+       vm->id = 0;
+       vm->fence = NULL;
+       vm->last_flush = NULL;
+       vm->last_id_use = NULL;
+       mutex_init(&vm->mutex);
+       INIT_LIST_HEAD(&vm->va);
+
+       pd_size = radeon_vm_directory_size(rdev);
+       pd_entries = radeon_vm_num_pdes(rdev);
+
+       /* allocate page table array */
+       pts_size = pd_entries * sizeof(struct radeon_vm_pt);
+       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+       if (vm->page_tables == NULL) {
+               DRM_ERROR("Cannot allocate memory for page table array\n");
+               return -ENOMEM;
+       }
+
+       r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
+                            RADEON_GEM_DOMAIN_VRAM, NULL,
+                            &vm->page_directory);
+       if (r)
+               return r;
+
+       r = radeon_vm_clear_bo(rdev, vm->page_directory);
+       if (r) {
+               radeon_bo_unref(&vm->page_directory);
+               vm->page_directory = NULL;
+               return r;
+       }
+
+       return 0;
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int i, r;
+
+       if (!list_empty(&vm->va)) {
+               dev_err(rdev->dev, "still active bo inside vm\n");
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+               list_del_init(&bo_va->vm_list);
+               r = radeon_bo_reserve(bo_va->bo, false);
+               if (!r) {
+                       list_del_init(&bo_va->bo_list);
+                       radeon_bo_unreserve(bo_va->bo);
+                       kfree(bo_va);
+               }
+       }
+
+
+       for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+               radeon_bo_unref(&vm->page_tables[i].bo);
+       kfree(vm->page_tables);
+
+       radeon_bo_unref(&vm->page_directory);
+
+       radeon_fence_unref(&vm->fence);
+       radeon_fence_unref(&vm->last_flush);
+       radeon_fence_unref(&vm->last_id_use);
+
+       mutex_destroy(&vm->mutex);
+}
index 1cf18b4a39ec1ca4403393664b7aba00c9a9bd5d..aefa2f6afa3ba3ccba6b86826db289fa9b21a5d4 100644 (file)
@@ -510,6 +510,7 @@ typedef struct {
 #define DRM_RADEON_GEM_GET_TILING      0x29
 #define DRM_RADEON_GEM_BUSY            0x2a
 #define DRM_RADEON_GEM_VA              0x2b
+#define DRM_RADEON_GEM_OP              0x2c
 
 #define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
 #define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -552,6 +553,7 @@ typedef struct {
 #define DRM_IOCTL_RADEON_GEM_GET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
 #define DRM_IOCTL_RADEON_GEM_VA                DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
+#define DRM_IOCTL_RADEON_GEM_OP                DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
 
 typedef struct drm_radeon_init {
        enum {
@@ -884,6 +886,16 @@ struct drm_radeon_gem_pwrite {
        uint64_t data_ptr;
 };
 
+/* Sets or returns a value associated with a buffer. */
+struct drm_radeon_gem_op {
+       uint32_t        handle; /* buffer */
+       uint32_t        op;     /* RADEON_GEM_OP_* */
+       uint64_t        value;  /* input or return value */
+};
+
+#define RADEON_GEM_OP_GET_INITIAL_DOMAIN       0
+#define RADEON_GEM_OP_SET_INITIAL_DOMAIN       1
+
 #define RADEON_VA_MAP                  1
 #define RADEON_VA_UNMAP                        2
 
@@ -992,6 +1004,9 @@ struct drm_radeon_cs {
 #define RADEON_INFO_VCE_FW_VERSION     0x1b
 /* version of VCE feedback */
 #define RADEON_INFO_VCE_FB_VERSION     0x1c
+#define RADEON_INFO_NUM_BYTES_MOVED    0x1d
+#define RADEON_INFO_VRAM_USAGE         0x1e
+#define RADEON_INFO_GTT_USAGE          0x1f
 
 
 struct drm_radeon_info {