2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/handle.h>
29 #include <core/ramht.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
33 #include <nvif/class.h>
34 #include <nvif/unpack.h>
36 static struct ramfc_desc
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
49 /*******************************************************************************
50 * FIFO channel objects
51 ******************************************************************************/
54 nv04_fifo_object_attach(struct nvkm_object *parent,
55 struct nvkm_object *object, u32 handle)
57 struct nv04_fifo *fifo = (void *)parent->engine;
58 struct nv04_fifo_chan *chan = (void *)parent;
59 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
60 u32 context, chid = chan->base.chid;
63 if (nv_iclass(object, NV_GPUOBJ_CLASS))
64 context = nv_gpuobj(object)->addr >> 4;
66 context = 0x00000004; /* just non-zero */
69 switch (nv_engidx(object->engine)) {
70 case NVDEV_ENGINE_DMAOBJ:
72 context |= 0x00000000;
75 context |= 0x00010000;
77 case NVDEV_ENGINE_MPEG:
78 context |= 0x00020000;
85 context |= 0x80000000; /* valid */
86 context |= chid << 24;
88 mutex_lock(&nv_subdev(fifo)->mutex);
89 ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context);
90 mutex_unlock(&nv_subdev(fifo)->mutex);
95 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
97 struct nv04_fifo *fifo = (void *)parent->engine;
98 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
99 mutex_lock(&nv_subdev(fifo)->mutex);
100 nvkm_ramht_remove(imem->ramht, cookie);
101 mutex_unlock(&nv_subdev(fifo)->mutex);
105 nv04_fifo_context_attach(struct nvkm_object *parent,
106 struct nvkm_object *object)
108 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
113 nv04_fifo_chan_ctor(struct nvkm_object *parent,
114 struct nvkm_object *engine,
115 struct nvkm_oclass *oclass, void *data, u32 size,
116 struct nvkm_object **pobject)
119 struct nv03_channel_dma_v0 v0;
121 struct nv04_fifo *fifo = (void *)engine;
122 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
123 struct nv04_fifo_chan *chan;
126 nvif_ioctl(parent, "create channel dma size %d\n", size);
127 if (nvif_unpack(args->v0, 0, 0, false)) {
128 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
129 "offset %08x\n", args->v0.version,
130 args->v0.pushbuf, args->v0.offset);
134 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
135 0x10000, args->v0.pushbuf,
136 (1ULL << NVDEV_ENGINE_DMAOBJ) |
137 (1ULL << NVDEV_ENGINE_SW) |
138 (1ULL << NVDEV_ENGINE_GR), &chan);
139 *pobject = nv_object(chan);
143 args->v0.chid = chan->base.chid;
145 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
146 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
147 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
148 chan->ramfc = chan->base.chid * 32;
150 nvkm_kmap(imem->ramfc);
151 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
152 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
153 nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
154 nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
155 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
156 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
158 NV_PFIFO_CACHE1_BIG_ENDIAN |
160 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
161 nvkm_done(imem->ramfc);
166 nv04_fifo_chan_dtor(struct nvkm_object *object)
168 struct nv04_fifo *fifo = (void *)object->engine;
169 struct nv04_fifo_chan *chan = (void *)object;
170 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
171 struct ramfc_desc *c = fifo->ramfc_desc;
173 nvkm_kmap(imem->ramfc);
175 nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
176 } while ((++c)->bits);
177 nvkm_done(imem->ramfc);
179 nvkm_fifo_channel_destroy(&chan->base);
183 nv04_fifo_chan_init(struct nvkm_object *object)
185 struct nv04_fifo *fifo = (void *)object->engine;
186 struct nv04_fifo_chan *chan = (void *)object;
187 struct nvkm_device *device = fifo->base.engine.subdev.device;
188 u32 mask = 1 << chan->base.chid;
192 ret = nvkm_fifo_channel_init(&chan->base);
196 spin_lock_irqsave(&fifo->base.lock, flags);
197 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
198 spin_unlock_irqrestore(&fifo->base.lock, flags);
203 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
205 struct nv04_fifo *fifo = (void *)object->engine;
206 struct nv04_fifo_chan *chan = (void *)object;
207 struct nvkm_device *device = fifo->base.engine.subdev.device;
208 struct nvkm_memory *fctx = device->imem->ramfc;
209 struct ramfc_desc *c;
211 u32 data = chan->ramfc;
214 /* prevent fifo context switches */
215 spin_lock_irqsave(&fifo->base.lock, flags);
216 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
218 /* if this channel is active, replace it with a null context */
219 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
220 if (chid == chan->base.chid) {
221 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
222 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
223 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
225 c = fifo->ramfc_desc;
227 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
228 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
229 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
230 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
231 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
232 } while ((++c)->bits);
234 c = fifo->ramfc_desc;
236 nvkm_wr32(device, c->regp, 0x00000000);
237 } while ((++c)->bits);
239 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
240 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
241 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
242 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
243 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
246 /* restore normal operation, after disabling dma mode */
247 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
248 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
249 spin_unlock_irqrestore(&fifo->base.lock, flags);
251 return nvkm_fifo_channel_fini(&chan->base, suspend);
254 static struct nvkm_ofuncs
256 .ctor = nv04_fifo_chan_ctor,
257 .dtor = nv04_fifo_chan_dtor,
258 .init = nv04_fifo_chan_init,
259 .fini = nv04_fifo_chan_fini,
260 .map = _nvkm_fifo_channel_map,
261 .rd32 = _nvkm_fifo_channel_rd32,
262 .wr32 = _nvkm_fifo_channel_wr32,
263 .ntfy = _nvkm_fifo_channel_ntfy
266 static struct nvkm_oclass
267 nv04_fifo_sclass[] = {
268 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
272 /*******************************************************************************
273 * FIFO context - basically just the instmem reserved for the channel
274 ******************************************************************************/
277 nv04_fifo_context_ctor(struct nvkm_object *parent,
278 struct nvkm_object *engine,
279 struct nvkm_oclass *oclass, void *data, u32 size,
280 struct nvkm_object **pobject)
282 struct nv04_fifo_base *base;
285 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
286 0x1000, NVOBJ_FLAG_HEAP, &base);
287 *pobject = nv_object(base);
294 static struct nvkm_oclass
296 .handle = NV_ENGCTX(FIFO, 0x04),
297 .ofuncs = &(struct nvkm_ofuncs) {
298 .ctor = nv04_fifo_context_ctor,
299 .dtor = _nvkm_fifo_context_dtor,
300 .init = _nvkm_fifo_context_init,
301 .fini = _nvkm_fifo_context_fini,
302 .rd32 = _nvkm_fifo_context_rd32,
303 .wr32 = _nvkm_fifo_context_wr32,
307 /*******************************************************************************
309 ******************************************************************************/
312 nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags)
313 __acquires(fifo->base.lock)
315 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
316 struct nvkm_device *device = fifo->base.engine.subdev.device;
319 spin_lock_irqsave(&fifo->base.lock, flags);
322 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
323 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
325 /* in some cases the puller may be left in an inconsistent state
326 * if you try to stop it while it's busy translating handles.
327 * sometimes you get a CACHE_ERROR, sometimes it just fails
328 * silently; sending incorrect instance offsets to PGRAPH after
329 * it's started up again.
331 * to avoid this, we invalidate the most recently calculated
334 nvkm_msec(device, 2000,
335 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
336 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
340 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
341 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
342 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
344 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
348 nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags)
349 __releases(fifo->base.lock)
351 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
352 struct nvkm_device *device = fifo->base.engine.subdev.device;
353 unsigned long flags = *pflags;
355 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
356 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
358 spin_unlock_irqrestore(&fifo->base.lock, flags);
362 nv_dma_state_err(u32 state)
364 static const char * const desc[] = {
365 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
366 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
368 return desc[(state >> 29) & 0x7];
372 nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data)
374 struct nvkm_device *device = fifo->base.engine.subdev.device;
375 struct nv04_fifo_chan *chan = NULL;
376 struct nvkm_handle *bind;
377 const int subc = (addr >> 13) & 0x7;
378 const int mthd = addr & 0x1ffc;
379 bool handled = false;
383 spin_lock_irqsave(&fifo->base.lock, flags);
384 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
385 chan = (void *)fifo->base.channel[chid];
391 bind = nvkm_namedb_get(nv_namedb(chan), data);
395 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
396 engine = 0x0000000f << (subc * 4);
397 chan->subc[subc] = data;
400 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
403 nvkm_namedb_put(bind);
406 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE);
407 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
410 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
412 if (!nv_call(bind->object, mthd, data))
414 nvkm_namedb_put(bind);
420 spin_unlock_irqrestore(&fifo->base.lock, flags);
425 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
427 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
428 struct nvkm_device *device = subdev->device;
432 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
433 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
434 * show that it wraps around to the start at GET=0x800.. No clue as to
437 ptr = (get & 0x7ff) >> 2;
439 if (device->card_type < NV_40) {
440 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
441 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
443 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
444 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
447 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) {
448 const char *client_name =
449 nvkm_client_name_for_fifo_chid(&fifo->base, chid);
450 nvkm_error(subdev, "CACHE_ERROR - "
451 "ch %d [%s] subc %d mthd %04x data %08x\n",
452 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
456 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
457 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
459 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
460 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
461 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
462 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
463 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
464 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
466 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
467 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
468 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
472 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
474 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
475 struct nvkm_device *device = subdev->device;
476 u32 dma_get = nvkm_rd32(device, 0x003244);
477 u32 dma_put = nvkm_rd32(device, 0x003240);
478 u32 push = nvkm_rd32(device, 0x003220);
479 u32 state = nvkm_rd32(device, 0x003228);
480 const char *client_name;
482 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid);
484 if (device->card_type == NV_50) {
485 u32 ho_get = nvkm_rd32(device, 0x003328);
486 u32 ho_put = nvkm_rd32(device, 0x003320);
487 u32 ib_get = nvkm_rd32(device, 0x003334);
488 u32 ib_put = nvkm_rd32(device, 0x003330);
490 nvkm_error(subdev, "DMA_PUSHER - "
491 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
492 "ib_put %08x state %08x (err: %s) push %08x\n",
493 chid, client_name, ho_get, dma_get, ho_put, dma_put,
494 ib_get, ib_put, state, nv_dma_state_err(state),
497 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
498 nvkm_wr32(device, 0x003364, 0x00000000);
499 if (dma_get != dma_put || ho_get != ho_put) {
500 nvkm_wr32(device, 0x003244, dma_put);
501 nvkm_wr32(device, 0x003328, ho_put);
503 if (ib_get != ib_put)
504 nvkm_wr32(device, 0x003334, ib_put);
506 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
507 "state %08x (err: %s) push %08x\n",
508 chid, client_name, dma_get, dma_put, state,
509 nv_dma_state_err(state), push);
511 if (dma_get != dma_put)
512 nvkm_wr32(device, 0x003244, dma_put);
515 nvkm_wr32(device, 0x003228, 0x00000000);
516 nvkm_wr32(device, 0x003220, 0x00000001);
517 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
521 nv04_fifo_intr(struct nvkm_subdev *subdev)
523 struct nvkm_device *device = subdev->device;
524 struct nv04_fifo *fifo = (void *)subdev;
525 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
526 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
527 u32 reassign, chid, get, sem;
529 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
530 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
532 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
533 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
535 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
536 nv04_fifo_cache_error(fifo, chid, get);
537 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
540 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
541 nv04_fifo_dma_pusher(fifo, chid);
542 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
545 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
546 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
547 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
549 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
550 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
552 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
553 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
556 if (device->card_type == NV_50) {
557 if (stat & 0x00000010) {
559 nvkm_wr32(device, 0x002100, 0x00000010);
562 if (stat & 0x40000000) {
563 nvkm_wr32(device, 0x002100, 0x40000000);
564 nvkm_fifo_uevent(&fifo->base);
570 nvkm_warn(subdev, "intr %08x\n", stat);
571 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
572 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
575 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
579 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
580 struct nvkm_oclass *oclass, void *data, u32 size,
581 struct nvkm_object **pobject)
583 struct nv04_fifo *fifo;
586 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo);
587 *pobject = nv_object(fifo);
591 nv_subdev(fifo)->unit = 0x00000100;
592 nv_subdev(fifo)->intr = nv04_fifo_intr;
593 nv_engine(fifo)->cclass = &nv04_fifo_cclass;
594 nv_engine(fifo)->sclass = nv04_fifo_sclass;
595 fifo->base.pause = nv04_fifo_pause;
596 fifo->base.start = nv04_fifo_start;
597 fifo->ramfc_desc = nv04_ramfc;
602 nv04_fifo_dtor(struct nvkm_object *object)
604 struct nv04_fifo *fifo = (void *)object;
605 nvkm_fifo_destroy(&fifo->base);
609 nv04_fifo_init(struct nvkm_object *object)
611 struct nv04_fifo *fifo = (void *)object;
612 struct nvkm_device *device = fifo->base.engine.subdev.device;
613 struct nvkm_instmem *imem = device->imem;
614 struct nvkm_ramht *ramht = imem->ramht;
615 struct nvkm_memory *ramro = imem->ramro;
616 struct nvkm_memory *ramfc = imem->ramfc;
619 ret = nvkm_fifo_init(&fifo->base);
623 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
624 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
626 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
627 ((ramht->bits - 9) << 16) |
628 (ramht->gpuobj->addr >> 8));
629 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
630 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
632 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
634 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
635 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
637 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
638 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
639 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
644 nv04_fifo_oclass = &(struct nvkm_oclass) {
645 .handle = NV_ENGINE(FIFO, 0x04),
646 .ofuncs = &(struct nvkm_ofuncs) {
647 .ctor = nv04_fifo_ctor,
648 .dtor = nv04_fifo_dtor,
649 .init = nv04_fifo_init,
650 .fini = _nvkm_fifo_fini,