drm/nouveau/mmu: switch to device pri macros
authorBen Skeggs <bskeggs@redhat.com>
Thu, 20 Aug 2015 04:54:09 +0000 (14:54 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 28 Aug 2015 02:40:16 +0000 (12:40 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c

index 982f7c7079343d72fa86a2fb41a236b068788635..3551b55a5646cf4a1dc15d6a8d86c9054edc4dee 100644 (file)
@@ -154,7 +154,8 @@ static void
 gf100_vm_flush(struct nvkm_vm *vm)
 {
        struct nvkm_mmu *mmu = (void *)vm->mmu;
-       struct nvkm_bar *bar = nvkm_bar(mmu);
+       struct nvkm_device *device = mmu->subdev.device;
+       struct nvkm_bar *bar = device->bar;
        struct nvkm_vm_pgd *vpgd;
        u32 type;
 
@@ -171,16 +172,16 @@ gf100_vm_flush(struct nvkm_vm *vm)
                 */
                if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
                        nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
-                                nv_rd32(mmu, 0x100c80), type);
+                                nvkm_rd32(device, 0x100c80), type);
                }
 
-               nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8);
-               nv_wr32(mmu, 0x100cbc, 0x80000000 | type);
+               nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+               nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 
                /* wait for flush to be queued? */
                if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
                        nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
-                                nv_rd32(mmu, 0x100c80), type);
+                                nvkm_rd32(device, 0x100c80), type);
                }
        }
        mutex_unlock(&nv_subdev(mmu)->mutex);
index 17b2b3979da43b4a6452f87ff27eacb71eb3a721..609c6a69b60aade80b483ff37de77d5bb066eb2e 100644 (file)
@@ -65,14 +65,15 @@ static void
 nv41_vm_flush(struct nvkm_vm *vm)
 {
        struct nv04_mmu *mmu = (void *)vm->mmu;
+       struct nvkm_device *device = mmu->base.subdev.device;
 
        mutex_lock(&nv_subdev(mmu)->mutex);
-       nv_wr32(mmu, 0x100810, 0x00000022);
+       nvkm_wr32(device, 0x100810, 0x00000022);
        if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
                nv_warn(mmu, "flush timeout, 0x%08x\n",
-                       nv_rd32(mmu, 0x100810));
+                       nvkm_rd32(device, 0x100810));
        }
-       nv_wr32(mmu, 0x100810, 0x00000000);
+       nvkm_wr32(device, 0x100810, 0x00000000);
        mutex_unlock(&nv_subdev(mmu)->mutex);
 }
 
@@ -131,6 +132,7 @@ static int
 nv41_mmu_init(struct nvkm_object *object)
 {
        struct nv04_mmu *mmu = (void *)object;
+       struct nvkm_device *device = mmu->base.subdev.device;
        struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
        int ret;
 
@@ -138,9 +140,9 @@ nv41_mmu_init(struct nvkm_object *object)
        if (ret)
                return ret;
 
-       nv_wr32(mmu, 0x100800, dma->addr | 0x00000002);
-       nv_mask(mmu, 0x10008c, 0x00000100, 0x00000100);
-       nv_wr32(mmu, 0x100820, 0x00000000);
+       nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
+       nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+       nvkm_wr32(device, 0x100820, 0x00000000);
        return 0;
 }
 
index 860654fee3876426da73c7fe1c900cf4c0fca107..371f627e17dbdd5e569e9167c53cf393c3ddb358 100644 (file)
@@ -140,11 +140,12 @@ static void
 nv44_vm_flush(struct nvkm_vm *vm)
 {
        struct nv04_mmu *mmu = (void *)vm->mmu;
-       nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE);
-       nv_wr32(mmu, 0x100808, 0x00000020);
+       struct nvkm_device *device = mmu->base.subdev.device;
+       nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+       nvkm_wr32(device, 0x100808, 0x00000020);
        if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
-               nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808));
-       nv_wr32(mmu, 0x100808, 0x00000000);
+               nv_error(mmu, "timeout: 0x%08x\n", nvkm_rd32(device, 0x100808));
+       nvkm_wr32(device, 0x100808, 0x00000000);
 }
 
 /*******************************************************************************
@@ -208,6 +209,7 @@ static int
 nv44_mmu_init(struct nvkm_object *object)
 {
        struct nv04_mmu *mmu = (void *)object;
+       struct nvkm_device *device = mmu->base.subdev.device;
        struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
        u32 addr;
        int ret;
@@ -220,17 +222,17 @@ nv44_mmu_init(struct nvkm_object *object)
         * allocated on 512KiB alignment, and not exceed a total size
         * of 512KiB for this to work correctly
         */
-       addr  = nv_rd32(mmu, 0x10020c);
+       addr  = nvkm_rd32(device, 0x10020c);
        addr -= ((gart->addr >> 19) + 1) << 19;
 
-       nv_wr32(mmu, 0x100850, 0x80000000);
-       nv_wr32(mmu, 0x100818, mmu->null);
-       nv_wr32(mmu, 0x100804, NV44_GART_SIZE);
-       nv_wr32(mmu, 0x100850, 0x00008000);
-       nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200);
-       nv_wr32(mmu, 0x100820, 0x00000000);
-       nv_wr32(mmu, 0x10082c, 0x00000001);
-       nv_wr32(mmu, 0x100800, addr | 0x00000010);
+       nvkm_wr32(device, 0x100850, 0x80000000);
+       nvkm_wr32(device, 0x100818, mmu->null);
+       nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+       nvkm_wr32(device, 0x100850, 0x00008000);
+       nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+       nvkm_wr32(device, 0x100820, 0x00000000);
+       nvkm_wr32(device, 0x10082c, 0x00000001);
+       nvkm_wr32(device, 0x100800, addr | 0x00000010);
        return 0;
 }
 
index 75c6a07e2dd54cffc8bac2972fe5848dec8fb867..1d7e1aa7e7d3d48c2711346a7ec2c92b551c4439 100644 (file)
@@ -146,7 +146,8 @@ static void
 nv50_vm_flush(struct nvkm_vm *vm)
 {
        struct nvkm_mmu *mmu = (void *)vm->mmu;
-       struct nvkm_bar *bar = nvkm_bar(mmu);
+       struct nvkm_device *device = mmu->subdev.device;
+       struct nvkm_bar *bar = device->bar;
        struct nvkm_engine *engine;
        int i, vme;
 
@@ -180,7 +181,7 @@ nv50_vm_flush(struct nvkm_vm *vm)
                        continue;
                }
 
-               nv_wr32(mmu, 0x100c80, (vme << 16) | 1);
+               nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
                if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
                        nv_error(mmu, "vm flush timeout: engine %d\n", vme);
        }