2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
28 #include <subdev/mmu.h>
29 #include <subdev/timer.h>
34 struct nvkm_gpuobj *mem;
35 struct nvkm_gpuobj *pad;
36 struct nvkm_gpuobj *pgd;
37 struct nvkm_vm *bar1_vm;
38 struct nvkm_gpuobj *bar1;
39 struct nvkm_vm *bar3_vm;
40 struct nvkm_gpuobj *bar3;
44 nv50_bar_kmap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags,
47 struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
50 ret = nvkm_vm_get(bar->bar3_vm, mem->size << 12, 12, flags, vma);
54 nvkm_vm_map(vma, mem);
59 nv50_bar_umap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags,
62 struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
65 ret = nvkm_vm_get(bar->bar1_vm, mem->size << 12, 12, flags, vma);
69 nvkm_vm_map(vma, mem);
74 nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
81 nv50_bar_flush(struct nvkm_bar *obj)
83 struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
84 struct nvkm_device *device = bar->base.subdev.device;
86 spin_lock_irqsave(&bar->lock, flags);
87 nvkm_wr32(device, 0x00330c, 0x00000001);
88 nvkm_msec(device, 2000,
89 if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
92 spin_unlock_irqrestore(&bar->lock, flags);
96 g84_bar_flush(struct nvkm_bar *obj)
98 struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
99 struct nvkm_device *device = bar->base.subdev.device;
101 spin_lock_irqsave(&bar->lock, flags);
102 nvkm_wr32(device, 0x070000, 0x00000001);
103 nvkm_msec(device, 2000,
104 if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
107 spin_unlock_irqrestore(&bar->lock, flags);
111 nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
112 struct nvkm_oclass *oclass, void *data, u32 size,
113 struct nvkm_object **pobject)
115 struct nvkm_device *device = nv_device(parent);
116 struct nvkm_object *heap;
118 struct nv50_bar *bar;
122 ret = nvkm_bar_create(parent, engine, oclass, &bar);
123 *pobject = nv_object(bar);
127 ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0,
128 NVOBJ_FLAG_HEAP, &bar->mem);
129 heap = nv_object(bar->mem);
133 ret = nvkm_gpuobj_new(nv_object(bar), heap,
134 (device->chipset == 0x50) ? 0x1400 : 0x0200,
139 ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd);
144 start = 0x0100000000ULL;
145 limit = start + nv_device_resource_len(device, 3);
147 ret = nvkm_vm_new(device, start, limit, start, &vm);
151 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
153 ret = nvkm_gpuobj_new(nv_object(bar), heap,
154 ((limit-- - start) >> 12) * 8, 0x1000,
155 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
156 vm->pgt[0].refcount[0] = 1;
160 ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
161 nvkm_vm_ref(NULL, &vm, NULL);
165 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3);
169 nv_wo32(bar->bar3, 0x00, 0x7fc00000);
170 nv_wo32(bar->bar3, 0x04, lower_32_bits(limit));
171 nv_wo32(bar->bar3, 0x08, lower_32_bits(start));
172 nv_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
173 upper_32_bits(start));
174 nv_wo32(bar->bar3, 0x10, 0x00000000);
175 nv_wo32(bar->bar3, 0x14, 0x00000000);
178 start = 0x0000000000ULL;
179 limit = start + nv_device_resource_len(device, 1);
181 ret = nvkm_vm_new(device, start, limit--, start, &vm);
185 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
187 ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
188 nvkm_vm_ref(NULL, &vm, NULL);
192 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1);
196 nv_wo32(bar->bar1, 0x00, 0x7fc00000);
197 nv_wo32(bar->bar1, 0x04, lower_32_bits(limit));
198 nv_wo32(bar->bar1, 0x08, lower_32_bits(start));
199 nv_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
200 upper_32_bits(start));
201 nv_wo32(bar->bar1, 0x10, 0x00000000);
202 nv_wo32(bar->bar1, 0x14, 0x00000000);
204 bar->base.alloc = nvkm_bar_alloc;
205 bar->base.kmap = nv50_bar_kmap;
206 bar->base.umap = nv50_bar_umap;
207 bar->base.unmap = nv50_bar_unmap;
208 if (device->chipset == 0x50)
209 bar->base.flush = nv50_bar_flush;
211 bar->base.flush = g84_bar_flush;
212 spin_lock_init(&bar->lock);
217 nv50_bar_dtor(struct nvkm_object *object)
219 struct nv50_bar *bar = (void *)object;
220 nvkm_gpuobj_ref(NULL, &bar->bar1);
221 nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
222 nvkm_gpuobj_ref(NULL, &bar->bar3);
224 nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]);
225 nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
227 nvkm_gpuobj_ref(NULL, &bar->pgd);
228 nvkm_gpuobj_ref(NULL, &bar->pad);
229 nvkm_gpuobj_ref(NULL, &bar->mem);
230 nvkm_bar_destroy(&bar->base);
234 nv50_bar_init(struct nvkm_object *object)
236 struct nv50_bar *bar = (void *)object;
237 struct nvkm_device *device = bar->base.subdev.device;
240 ret = nvkm_bar_init(&bar->base);
244 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
245 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
246 nvkm_wr32(device, 0x100c80, 0x00060001);
247 if (nvkm_msec(device, 2000,
248 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
253 nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
254 nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
255 nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
256 nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
257 for (i = 0; i < 8; i++)
258 nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
263 nv50_bar_fini(struct nvkm_object *object, bool suspend)
265 struct nv50_bar *bar = (void *)object;
266 return nvkm_bar_fini(&bar->base, suspend);
271 .handle = NV_SUBDEV(BAR, 0x50),
272 .ofuncs = &(struct nvkm_ofuncs) {
273 .ctor = nv50_bar_ctor,
274 .dtor = nv50_bar_dtor,
275 .init = nv50_bar_init,
276 .fini = nv50_bar_fini,