2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <core/enum.h>
31 #include <core/handle.h>
32 #include <core/ramht.h>
33 #include <engine/dmaobj.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36 #include <subdev/bios/disp.h>
37 #include <subdev/bios/init.h>
38 #include <subdev/bios/pll.h>
39 #include <subdev/devinit.h>
40 #include <subdev/fb.h>
41 #include <subdev/timer.h>
43 #include <nvif/class.h>
44 #include <nvif/event.h>
45 #include <nvif/unpack.h>
47 /*******************************************************************************
48 * EVO channel base class
49 ******************************************************************************/
52 nv50_disp_chan_create_(struct nvkm_object *parent,
53 struct nvkm_object *engine,
54 struct nvkm_oclass *oclass, int head,
55 int length, void **pobject)
57 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
58 struct nv50_disp_base *base = (void *)parent;
59 struct nv50_disp_chan *chan;
60 int chid = impl->chid + head;
63 if (base->chan & (1 << chid))
65 base->chan |= (1 << chid);
67 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
68 (1ULL << NVDEV_ENGINE_DMAOBJ),
75 nv_parent(chan)->object_attach = impl->attach;
76 nv_parent(chan)->object_detach = impl->detach;
81 nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
83 struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
84 base->chan &= ~(1 << chan->chid);
85 nvkm_namedb_destroy(&chan->base);
89 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
91 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
92 struct nvkm_device *device = disp->base.engine.subdev.device;
93 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
94 nvkm_wr32(device, 0x610020, 0x00000001 << index);
98 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
100 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
101 struct nvkm_device *device = disp->base.engine.subdev.device;
102 nvkm_wr32(device, 0x610020, 0x00000001 << index);
103 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
107 nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
109 struct nvif_notify_uevent_rep {
112 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
116 nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
117 struct nvkm_notify *notify)
119 struct nv50_disp_dmac *dmac = (void *)object;
121 struct nvif_notify_uevent_req none;
125 if (nvif_unvers(args->none)) {
126 notify->size = sizeof(struct nvif_notify_uevent_rep);
128 notify->index = dmac->base.chid;
135 const struct nvkm_event_func
136 nv50_disp_chan_uevent = {
137 .ctor = nv50_disp_chan_uevent_ctor,
138 .init = nv50_disp_chan_uevent_init,
139 .fini = nv50_disp_chan_uevent_fini,
143 nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
144 struct nvkm_event **pevent)
146 struct nv50_disp *disp = (void *)object->engine;
148 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
149 *pevent = &disp->uevent;
158 nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
160 struct nv50_disp_chan *chan = (void *)object;
161 *addr = nv_device_resource_start(nv_device(object), 0) +
162 0x640000 + (chan->chid * 0x1000);
168 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
170 struct nv50_disp_chan *chan = (void *)object;
171 struct nvkm_device *device = object->engine->subdev.device;
172 return nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
176 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
178 struct nv50_disp_chan *chan = (void *)object;
179 struct nvkm_device *device = object->engine->subdev.device;
180 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
183 /*******************************************************************************
184 * EVO DMA channel base class
185 ******************************************************************************/
188 nv50_disp_dmac_object_attach(struct nvkm_object *parent,
189 struct nvkm_object *object, u32 name)
191 struct nv50_disp_base *base = (void *)parent->parent;
192 struct nv50_disp_chan *chan = (void *)parent;
193 u32 addr = nv_gpuobj(object)->node->offset;
194 u32 chid = chan->chid;
195 u32 data = (chid << 28) | (addr << 10) | chid;
196 return nvkm_ramht_insert(base->ramht, NULL, chid, 0, name, data);
200 nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
202 struct nv50_disp_base *base = (void *)parent->parent;
203 nvkm_ramht_remove(base->ramht, cookie);
207 nv50_disp_dmac_create_(struct nvkm_object *parent,
208 struct nvkm_object *engine,
209 struct nvkm_oclass *oclass, u64 pushbuf, int head,
210 int length, void **pobject)
212 struct nvkm_client *client = nvkm_client(parent);
213 struct nvkm_handle *handle;
214 struct nvkm_dmaobj *dmaobj;
215 struct nv50_disp_dmac *dmac;
218 ret = nv50_disp_chan_create_(parent, engine, oclass, head,
224 handle = nvkm_client_search(client, pushbuf);
227 dmaobj = (void *)handle->object;
229 switch (nv_mclass(dmaobj)) {
232 if (dmaobj->limit - dmaobj->start != 0xfff)
235 switch (dmaobj->target) {
236 case NV_MEM_TARGET_VRAM:
237 dmac->push = 0x00000001 | dmaobj->start >> 8;
239 case NV_MEM_TARGET_PCI_NOSNOOP:
240 dmac->push = 0x00000003 | dmaobj->start >> 8;
254 nv50_disp_dmac_dtor(struct nvkm_object *object)
256 struct nv50_disp_dmac *dmac = (void *)object;
257 nv50_disp_chan_destroy(&dmac->base);
261 nv50_disp_dmac_init(struct nvkm_object *object)
263 struct nv50_disp *disp = (void *)object->engine;
264 struct nv50_disp_dmac *dmac = (void *)object;
265 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
266 struct nvkm_device *device = subdev->device;
267 int chid = dmac->base.chid;
270 ret = nv50_disp_chan_init(&dmac->base);
274 /* enable error reporting */
275 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
277 /* initialise channel for dma command submission */
278 nvkm_wr32(device, 0x610204 + (chid * 0x0010), dmac->push);
279 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
280 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
281 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
282 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
283 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
285 /* wait for it to go inactive */
286 if (nvkm_msec(device, 2000,
287 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
290 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
291 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
299 nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
301 struct nv50_disp *disp = (void *)object->engine;
302 struct nv50_disp_dmac *dmac = (void *)object;
303 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
304 struct nvkm_device *device = subdev->device;
305 int chid = dmac->base.chid;
307 /* deactivate channel */
308 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
309 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
310 if (nvkm_msec(device, 2000,
311 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
314 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
315 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
320 /* disable error reporting and completion notifications */
321 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
323 return nv50_disp_chan_fini(&dmac->base, suspend);
326 /*******************************************************************************
327 * EVO master channel object
328 ******************************************************************************/
331 nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
332 const struct nv50_disp_mthd_list *list, int inst)
334 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
335 struct nvkm_device *device = subdev->device;
338 for (i = 0; list->data[i].mthd; i++) {
339 if (list->data[i].addr) {
340 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
341 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
342 u32 mthd = list->data[i].mthd + (list->mthd * inst);
343 const char *name = list->data[i].name;
347 snprintf(mods, sizeof(mods), "-> %08x", next);
349 snprintf(mods, sizeof(mods), "%13c", ' ');
351 nvkm_printk_(subdev, debug, info,
352 "\t%04x: %08x %s%s%s\n",
353 mthd, prev, mods, name ? " // " : "",
360 nv50_disp_mthd_chan(struct nv50_disp *disp, int debug, int head,
361 const struct nv50_disp_mthd_chan *chan)
363 struct nvkm_object *object = nv_object(disp);
364 const struct nv50_disp_impl *impl = (void *)object->oclass;
365 const struct nv50_disp_mthd_list *list;
366 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
369 if (debug > nv_subdev(disp)->debug)
372 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
373 u32 base = head * chan->addr;
374 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
375 const char *cname = chan->name;
376 const char *sname = "";
377 char cname_[16], sname_[16];
380 snprintf(cname_, sizeof(cname_), "%s %d",
385 if (chan->data[i].nr > 1) {
386 snprintf(sname_, sizeof(sname_), " - %s %d",
387 chan->data[i].name, j);
391 nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
392 nv50_disp_mthd_list(disp, debug, base, impl->mthd.prev,
398 const struct nv50_disp_mthd_list
399 nv50_disp_core_mthd_base = {
403 { 0x0080, 0x000000 },
404 { 0x0084, 0x610bb8 },
405 { 0x0088, 0x610b9c },
406 { 0x008c, 0x000000 },
411 static const struct nv50_disp_mthd_list
412 nv50_disp_core_mthd_dac = {
416 { 0x0400, 0x610b58 },
417 { 0x0404, 0x610bdc },
418 { 0x0420, 0x610828 },
423 const struct nv50_disp_mthd_list
424 nv50_disp_core_mthd_sor = {
428 { 0x0600, 0x610b70 },
433 const struct nv50_disp_mthd_list
434 nv50_disp_core_mthd_pior = {
438 { 0x0700, 0x610b80 },
443 static const struct nv50_disp_mthd_list
444 nv50_disp_core_mthd_head = {
448 { 0x0800, 0x610ad8 },
449 { 0x0804, 0x610ad0 },
450 { 0x0808, 0x610a48 },
451 { 0x080c, 0x610a78 },
452 { 0x0810, 0x610ac0 },
453 { 0x0814, 0x610af8 },
454 { 0x0818, 0x610b00 },
455 { 0x081c, 0x610ae8 },
456 { 0x0820, 0x610af0 },
457 { 0x0824, 0x610b08 },
458 { 0x0828, 0x610b10 },
459 { 0x082c, 0x610a68 },
460 { 0x0830, 0x610a60 },
461 { 0x0834, 0x000000 },
462 { 0x0838, 0x610a40 },
463 { 0x0840, 0x610a24 },
464 { 0x0844, 0x610a2c },
465 { 0x0848, 0x610aa8 },
466 { 0x084c, 0x610ab0 },
467 { 0x0860, 0x610a84 },
468 { 0x0864, 0x610a90 },
469 { 0x0868, 0x610b18 },
470 { 0x086c, 0x610b20 },
471 { 0x0870, 0x610ac8 },
472 { 0x0874, 0x610a38 },
473 { 0x0880, 0x610a58 },
474 { 0x0884, 0x610a9c },
475 { 0x08a0, 0x610a70 },
476 { 0x08a4, 0x610a50 },
477 { 0x08a8, 0x610ae0 },
478 { 0x08c0, 0x610b28 },
479 { 0x08c4, 0x610b30 },
480 { 0x08c8, 0x610b40 },
481 { 0x08d4, 0x610b38 },
482 { 0x08d8, 0x610b48 },
483 { 0x08dc, 0x610b50 },
484 { 0x0900, 0x610a18 },
485 { 0x0904, 0x610ab8 },
490 static const struct nv50_disp_mthd_chan
491 nv50_disp_core_mthd_chan = {
495 { "Global", 1, &nv50_disp_core_mthd_base },
496 { "DAC", 3, &nv50_disp_core_mthd_dac },
497 { "SOR", 2, &nv50_disp_core_mthd_sor },
498 { "PIOR", 3, &nv50_disp_core_mthd_pior },
499 { "HEAD", 2, &nv50_disp_core_mthd_head },
505 nv50_disp_core_ctor(struct nvkm_object *parent,
506 struct nvkm_object *engine,
507 struct nvkm_oclass *oclass, void *data, u32 size,
508 struct nvkm_object **pobject)
511 struct nv50_disp_core_channel_dma_v0 v0;
513 struct nv50_disp_dmac *mast;
516 nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
517 if (nvif_unpack(args->v0, 0, 0, false)) {
518 nvif_ioctl(parent, "create disp core channel dma vers %d "
520 args->v0.version, args->v0.pushbuf);
524 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
525 0, sizeof(*mast), (void **)&mast);
526 *pobject = nv_object(mast);
534 nv50_disp_core_init(struct nvkm_object *object)
536 struct nv50_disp *disp = (void *)object->engine;
537 struct nv50_disp_dmac *mast = (void *)object;
538 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
539 struct nvkm_device *device = subdev->device;
542 ret = nv50_disp_chan_init(&mast->base);
546 /* enable error reporting */
547 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
549 /* attempt to unstick channel from some unknown state */
550 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
551 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
552 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
553 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
555 /* initialise channel for dma command submission */
556 nvkm_wr32(device, 0x610204, mast->push);
557 nvkm_wr32(device, 0x610208, 0x00010000);
558 nvkm_wr32(device, 0x61020c, 0x00000000);
559 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
560 nvkm_wr32(device, 0x640000, 0x00000000);
561 nvkm_wr32(device, 0x610200, 0x01000013);
563 /* wait for it to go inactive */
564 if (nvkm_msec(device, 2000,
565 if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
568 nvkm_error(subdev, "core init: %08x\n",
569 nvkm_rd32(device, 0x610200));
577 nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
579 struct nv50_disp *disp = (void *)object->engine;
580 struct nv50_disp_dmac *mast = (void *)object;
581 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
582 struct nvkm_device *device = subdev->device;
584 /* deactivate channel */
585 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
586 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
587 if (nvkm_msec(device, 2000,
588 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
591 nvkm_error(subdev, "core fini: %08x\n",
592 nvkm_rd32(device, 0x610200));
597 /* disable error reporting and completion notifications */
598 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
600 return nv50_disp_chan_fini(&mast->base, suspend);
603 struct nv50_disp_chan_impl
604 nv50_disp_core_ofuncs = {
605 .base.ctor = nv50_disp_core_ctor,
606 .base.dtor = nv50_disp_dmac_dtor,
607 .base.init = nv50_disp_core_init,
608 .base.fini = nv50_disp_core_fini,
609 .base.map = nv50_disp_chan_map,
610 .base.ntfy = nv50_disp_chan_ntfy,
611 .base.rd32 = nv50_disp_chan_rd32,
612 .base.wr32 = nv50_disp_chan_wr32,
614 .attach = nv50_disp_dmac_object_attach,
615 .detach = nv50_disp_dmac_object_detach,
618 /*******************************************************************************
619 * EVO sync channel objects
620 ******************************************************************************/
622 static const struct nv50_disp_mthd_list
623 nv50_disp_base_mthd_base = {
627 { 0x0080, 0x000000 },
628 { 0x0084, 0x0008c4 },
629 { 0x0088, 0x0008d0 },
630 { 0x008c, 0x0008dc },
631 { 0x0090, 0x0008e4 },
632 { 0x0094, 0x610884 },
633 { 0x00a0, 0x6108a0 },
634 { 0x00a4, 0x610878 },
635 { 0x00c0, 0x61086c },
636 { 0x00e0, 0x610858 },
637 { 0x00e4, 0x610860 },
638 { 0x00e8, 0x6108ac },
639 { 0x00ec, 0x6108b4 },
640 { 0x0100, 0x610894 },
641 { 0x0110, 0x6108bc },
642 { 0x0114, 0x61088c },
647 const struct nv50_disp_mthd_list
648 nv50_disp_base_mthd_image = {
652 { 0x0800, 0x6108f0 },
653 { 0x0804, 0x6108fc },
654 { 0x0808, 0x61090c },
655 { 0x080c, 0x610914 },
656 { 0x0810, 0x610904 },
661 static const struct nv50_disp_mthd_chan
662 nv50_disp_base_mthd_chan = {
666 { "Global", 1, &nv50_disp_base_mthd_base },
667 { "Image", 2, &nv50_disp_base_mthd_image },
673 nv50_disp_base_ctor(struct nvkm_object *parent,
674 struct nvkm_object *engine,
675 struct nvkm_oclass *oclass, void *data, u32 size,
676 struct nvkm_object **pobject)
679 struct nv50_disp_base_channel_dma_v0 v0;
681 struct nv50_disp *disp = (void *)engine;
682 struct nv50_disp_dmac *dmac;
685 nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
686 if (nvif_unpack(args->v0, 0, 0, false)) {
687 nvif_ioctl(parent, "create disp base channel dma vers %d "
688 "pushbuf %016llx head %d\n",
689 args->v0.version, args->v0.pushbuf, args->v0.head);
690 if (args->v0.head > disp->head.nr)
695 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
696 args->v0.head, sizeof(*dmac),
698 *pobject = nv_object(dmac);
705 struct nv50_disp_chan_impl
706 nv50_disp_base_ofuncs = {
707 .base.ctor = nv50_disp_base_ctor,
708 .base.dtor = nv50_disp_dmac_dtor,
709 .base.init = nv50_disp_dmac_init,
710 .base.fini = nv50_disp_dmac_fini,
711 .base.ntfy = nv50_disp_chan_ntfy,
712 .base.map = nv50_disp_chan_map,
713 .base.rd32 = nv50_disp_chan_rd32,
714 .base.wr32 = nv50_disp_chan_wr32,
716 .attach = nv50_disp_dmac_object_attach,
717 .detach = nv50_disp_dmac_object_detach,
720 /*******************************************************************************
721 * EVO overlay channel objects
722 ******************************************************************************/
724 const struct nv50_disp_mthd_list
725 nv50_disp_ovly_mthd_base = {
729 { 0x0080, 0x000000 },
730 { 0x0084, 0x0009a0 },
731 { 0x0088, 0x0009c0 },
732 { 0x008c, 0x0009c8 },
733 { 0x0090, 0x6109b4 },
734 { 0x0094, 0x610970 },
735 { 0x00a0, 0x610998 },
736 { 0x00a4, 0x610964 },
737 { 0x00c0, 0x610958 },
738 { 0x00e0, 0x6109a8 },
739 { 0x00e4, 0x6109d0 },
740 { 0x00e8, 0x6109d8 },
741 { 0x0100, 0x61094c },
742 { 0x0104, 0x610984 },
743 { 0x0108, 0x61098c },
744 { 0x0800, 0x6109f8 },
745 { 0x0808, 0x610a08 },
746 { 0x080c, 0x610a10 },
747 { 0x0810, 0x610a00 },
752 static const struct nv50_disp_mthd_chan
753 nv50_disp_ovly_mthd_chan = {
757 { "Global", 1, &nv50_disp_ovly_mthd_base },
763 nv50_disp_ovly_ctor(struct nvkm_object *parent,
764 struct nvkm_object *engine,
765 struct nvkm_oclass *oclass, void *data, u32 size,
766 struct nvkm_object **pobject)
769 struct nv50_disp_overlay_channel_dma_v0 v0;
771 struct nv50_disp *disp = (void *)engine;
772 struct nv50_disp_dmac *dmac;
775 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
776 if (nvif_unpack(args->v0, 0, 0, false)) {
777 nvif_ioctl(parent, "create disp overlay channel dma vers %d "
778 "pushbuf %016llx head %d\n",
779 args->v0.version, args->v0.pushbuf, args->v0.head);
780 if (args->v0.head > disp->head.nr)
785 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
786 args->v0.head, sizeof(*dmac),
788 *pobject = nv_object(dmac);
795 struct nv50_disp_chan_impl
796 nv50_disp_ovly_ofuncs = {
797 .base.ctor = nv50_disp_ovly_ctor,
798 .base.dtor = nv50_disp_dmac_dtor,
799 .base.init = nv50_disp_dmac_init,
800 .base.fini = nv50_disp_dmac_fini,
801 .base.ntfy = nv50_disp_chan_ntfy,
802 .base.map = nv50_disp_chan_map,
803 .base.rd32 = nv50_disp_chan_rd32,
804 .base.wr32 = nv50_disp_chan_wr32,
806 .attach = nv50_disp_dmac_object_attach,
807 .detach = nv50_disp_dmac_object_detach,
810 /*******************************************************************************
811 * EVO PIO channel base class
812 ******************************************************************************/
815 nv50_disp_pioc_create_(struct nvkm_object *parent,
816 struct nvkm_object *engine,
817 struct nvkm_oclass *oclass, int head,
818 int length, void **pobject)
820 return nv50_disp_chan_create_(parent, engine, oclass, head,
825 nv50_disp_pioc_dtor(struct nvkm_object *object)
827 struct nv50_disp_pioc *pioc = (void *)object;
828 nv50_disp_chan_destroy(&pioc->base);
832 nv50_disp_pioc_init(struct nvkm_object *object)
834 struct nv50_disp *disp = (void *)object->engine;
835 struct nv50_disp_pioc *pioc = (void *)object;
836 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
837 struct nvkm_device *device = subdev->device;
838 int chid = pioc->base.chid;
841 ret = nv50_disp_chan_init(&pioc->base);
845 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
846 if (nvkm_msec(device, 2000,
847 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
850 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
851 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
855 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
856 if (nvkm_msec(device, 2000,
857 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
858 if ((tmp & 0x00030000) == 0x00010000)
861 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
862 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
870 nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
872 struct nv50_disp *disp = (void *)object->engine;
873 struct nv50_disp_pioc *pioc = (void *)object;
874 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
875 struct nvkm_device *device = subdev->device;
876 int chid = pioc->base.chid;
878 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
879 if (nvkm_msec(device, 2000,
880 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
883 nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
884 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
889 return nv50_disp_chan_fini(&pioc->base, suspend);
892 /*******************************************************************************
893 * EVO immediate overlay channel objects
894 ******************************************************************************/
897 nv50_disp_oimm_ctor(struct nvkm_object *parent,
898 struct nvkm_object *engine,
899 struct nvkm_oclass *oclass, void *data, u32 size,
900 struct nvkm_object **pobject)
903 struct nv50_disp_overlay_v0 v0;
905 struct nv50_disp *disp = (void *)engine;
906 struct nv50_disp_pioc *pioc;
909 nvif_ioctl(parent, "create disp overlay size %d\n", size);
910 if (nvif_unpack(args->v0, 0, 0, false)) {
911 nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
912 args->v0.version, args->v0.head);
913 if (args->v0.head > disp->head.nr)
918 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
919 sizeof(*pioc), (void **)&pioc);
920 *pobject = nv_object(pioc);
927 struct nv50_disp_chan_impl
928 nv50_disp_oimm_ofuncs = {
929 .base.ctor = nv50_disp_oimm_ctor,
930 .base.dtor = nv50_disp_pioc_dtor,
931 .base.init = nv50_disp_pioc_init,
932 .base.fini = nv50_disp_pioc_fini,
933 .base.ntfy = nv50_disp_chan_ntfy,
934 .base.map = nv50_disp_chan_map,
935 .base.rd32 = nv50_disp_chan_rd32,
936 .base.wr32 = nv50_disp_chan_wr32,
940 /*******************************************************************************
941 * EVO cursor channel objects
942 ******************************************************************************/
945 nv50_disp_curs_ctor(struct nvkm_object *parent,
946 struct nvkm_object *engine,
947 struct nvkm_oclass *oclass, void *data, u32 size,
948 struct nvkm_object **pobject)
951 struct nv50_disp_cursor_v0 v0;
953 struct nv50_disp *disp = (void *)engine;
954 struct nv50_disp_pioc *pioc;
957 nvif_ioctl(parent, "create disp cursor size %d\n", size);
958 if (nvif_unpack(args->v0, 0, 0, false)) {
959 nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
960 args->v0.version, args->v0.head);
961 if (args->v0.head > disp->head.nr)
966 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
967 sizeof(*pioc), (void **)&pioc);
968 *pobject = nv_object(pioc);
975 struct nv50_disp_chan_impl
976 nv50_disp_curs_ofuncs = {
977 .base.ctor = nv50_disp_curs_ctor,
978 .base.dtor = nv50_disp_pioc_dtor,
979 .base.init = nv50_disp_pioc_init,
980 .base.fini = nv50_disp_pioc_fini,
981 .base.ntfy = nv50_disp_chan_ntfy,
982 .base.map = nv50_disp_chan_map,
983 .base.rd32 = nv50_disp_chan_rd32,
984 .base.wr32 = nv50_disp_chan_wr32,
988 /*******************************************************************************
989 * Base display object
990 ******************************************************************************/
993 nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
995 struct nvkm_device *device = disp->base.engine.subdev.device;
996 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
997 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
998 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540));
1000 struct nv04_disp_scanoutpos_v0 v0;
1004 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
1005 if (nvif_unpack(args->v0, 0, 0, false)) {
1006 nvif_ioctl(object, "disp scanoutpos vers %d\n",
1008 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
1009 args->v0.hblanke = (blanke & 0x0000ffff);
1010 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
1011 args->v0.hblanks = (blanks & 0x0000ffff);
1012 args->v0.vtotal = ( total & 0xffff0000) >> 16;
1013 args->v0.htotal = ( total & 0x0000ffff);
1014 args->v0.time[0] = ktime_to_ns(ktime_get());
1015 args->v0.vline = /* vline read locks hline */
1016 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
1017 args->v0.time[1] = ktime_to_ns(ktime_get());
1019 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
1027 nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
1029 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
1031 struct nv50_disp_mthd_v0 v0;
1032 struct nv50_disp_mthd_v1 v1;
1034 struct nv50_disp *disp = (void *)object->engine;
1035 struct nvkm_output *outp = NULL;
1036 struct nvkm_output *temp;
1040 if (mthd != NV50_DISP_MTHD)
1043 nvif_ioctl(object, "disp mthd size %d\n", size);
1044 if (nvif_unpack(args->v0, 0, 0, true)) {
1045 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
1046 args->v0.version, args->v0.method, args->v0.head);
1047 mthd = args->v0.method;
1048 head = args->v0.head;
1050 if (nvif_unpack(args->v1, 1, 1, true)) {
1051 nvif_ioctl(object, "disp mthd vers %d mthd %02x "
1052 "type %04x mask %04x\n",
1053 args->v1.version, args->v1.method,
1054 args->v1.hasht, args->v1.hashm);
1055 mthd = args->v1.method;
1056 type = args->v1.hasht;
1057 mask = args->v1.hashm;
1058 head = ffs((mask >> 8) & 0x0f) - 1;
1062 if (head < 0 || head >= disp->head.nr)
1066 list_for_each_entry(temp, &disp->base.outp, head) {
1067 if ((temp->info.hasht == type) &&
1068 (temp->info.hashm & mask) == mask) {
1078 case NV50_DISP_SCANOUTPOS:
1079 return impl->head.scanoutpos(object, disp, data, size, head);
1084 switch (mthd * !!outp) {
1085 case NV50_DISP_MTHD_V1_DAC_PWR:
1086 return disp->dac.power(object, disp, data, size, head, outp);
1087 case NV50_DISP_MTHD_V1_DAC_LOAD:
1088 return disp->dac.sense(object, disp, data, size, head, outp);
1089 case NV50_DISP_MTHD_V1_SOR_PWR:
1090 return disp->sor.power(object, disp, data, size, head, outp);
1091 case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
1092 if (!disp->sor.hda_eld)
1094 return disp->sor.hda_eld(object, disp, data, size, head, outp);
1095 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
1096 if (!disp->sor.hdmi)
1098 return disp->sor.hdmi(object, disp, data, size, head, outp);
1099 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
1101 struct nv50_disp_sor_lvds_script_v0 v0;
1103 nvif_ioctl(object, "disp sor lvds script size %d\n", size);
1104 if (nvif_unpack(args->v0, 0, 0, false)) {
1105 nvif_ioctl(object, "disp sor lvds script "
1106 "vers %d name %04x\n",
1107 args->v0.version, args->v0.script);
1108 disp->sor.lvdsconf = args->v0.script;
1114 case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
1115 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
1117 struct nv50_disp_sor_dp_pwr_v0 v0;
1119 nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
1120 if (nvif_unpack(args->v0, 0, 0, false)) {
1121 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
1122 args->v0.version, args->v0.state);
1123 if (args->v0.state == 0) {
1124 nvkm_notify_put(&outpdp->irq);
1125 outpdp->func->lnk_pwr(outpdp, 0);
1126 atomic_set(&outpdp->lt.done, 0);
1129 if (args->v0.state != 0) {
1130 nvkm_output_dp_train(&outpdp->base, 0, true);
1137 case NV50_DISP_MTHD_V1_PIOR_PWR:
1138 if (!disp->pior.power)
1140 return disp->pior.power(object, disp, data, size, head, outp);
1149 nv50_disp_main_ctor(struct nvkm_object *parent,
1150 struct nvkm_object *engine,
1151 struct nvkm_oclass *oclass, void *data, u32 size,
1152 struct nvkm_object **pobject)
1154 struct nv50_disp *disp = (void *)engine;
1155 struct nv50_disp_base *base;
1156 struct nvkm_device *device = disp->base.engine.subdev.device;
1157 struct nvkm_gpuobj *instmem = (void *)parent;
1160 ret = nvkm_parent_create(parent, engine, oclass, 0,
1161 disp->sclass, 0, &base);
1162 *pobject = nv_object(base);
1166 return nvkm_ramht_new(device, 0x1000, 0, instmem, &base->ramht);
1170 nv50_disp_main_dtor(struct nvkm_object *object)
1172 struct nv50_disp_base *base = (void *)object;
1173 nvkm_ramht_del(&base->ramht);
1174 nvkm_parent_destroy(&base->base);
1178 nv50_disp_main_init(struct nvkm_object *object)
1180 struct nv50_disp *disp = (void *)object->engine;
1181 struct nv50_disp_base *base = (void *)object;
1182 struct nvkm_device *device = disp->base.engine.subdev.device;
1186 ret = nvkm_parent_init(&base->base);
1190 /* The below segments of code copying values from one register to
1191 * another appear to inform EVO of the display capabilities or
1192 * something similar. NFI what the 0x614004 caps are for..
1194 tmp = nvkm_rd32(device, 0x614004);
1195 nvkm_wr32(device, 0x610184, tmp);
1198 for (i = 0; i < disp->head.nr; i++) {
1199 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
1200 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
1201 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
1202 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
1203 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
1204 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
1205 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
1206 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
1210 for (i = 0; i < disp->dac.nr; i++) {
1211 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
1212 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
1216 for (i = 0; i < disp->sor.nr; i++) {
1217 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
1218 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
1222 for (i = 0; i < disp->pior.nr; i++) {
1223 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
1224 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
1227 /* steal display away from vbios, or something like that */
1228 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
1229 nvkm_wr32(device, 0x610024, 0x00000100);
1230 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1231 if (nvkm_msec(device, 2000,
1232 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
1238 /* point at display engine memory area (hash table, objects) */
1239 nvkm_wr32(device, 0x610010, (base->ramht->gpuobj->addr >> 8) | 9);
1241 /* enable supervisor interrupts, disable everything else */
1242 nvkm_wr32(device, 0x61002c, 0x00000370);
1243 nvkm_wr32(device, 0x610028, 0x00000000);
1248 nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
1250 struct nv50_disp *disp = (void *)object->engine;
1251 struct nv50_disp_base *base = (void *)object;
1252 struct nvkm_device *device = disp->base.engine.subdev.device;
1254 /* disable all interrupts */
1255 nvkm_wr32(device, 0x610024, 0x00000000);
1256 nvkm_wr32(device, 0x610020, 0x00000000);
1258 return nvkm_parent_fini(&base->base, suspend);
1262 nv50_disp_main_ofuncs = {
1263 .ctor = nv50_disp_main_ctor,
1264 .dtor = nv50_disp_main_dtor,
1265 .init = nv50_disp_main_init,
1266 .fini = nv50_disp_main_fini,
1267 .mthd = nv50_disp_main_mthd,
1268 .ntfy = nvkm_disp_ntfy,
1271 static struct nvkm_oclass
1272 nv50_disp_main_oclass[] = {
1273 { NV50_DISP, &nv50_disp_main_ofuncs },
1277 static struct nvkm_oclass
1278 nv50_disp_sclass[] = {
1279 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
1280 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
1281 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
1282 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
1283 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
1287 /*******************************************************************************
1288 * Display context, tracks instmem allocation and prevents more than one
1289 * client using the display hardware at any time.
1290 ******************************************************************************/
1293 nv50_disp_data_ctor(struct nvkm_object *parent,
1294 struct nvkm_object *engine,
1295 struct nvkm_oclass *oclass, void *data, u32 size,
1296 struct nvkm_object **pobject)
1298 struct nv50_disp *disp = (void *)engine;
1299 struct nvkm_gpuobj *gpuobj;
1302 /* no context needed for channel objects... */
1303 if (nv_mclass(parent) != NV_DEVICE) {
1304 atomic_inc(&parent->refcount);
1309 /* allocate display hardware to client */
1310 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, NULL,
1311 0x10000, 0x10000, NVOBJ_FLAG_HEAP,
1313 *pobject = nv_object(gpuobj);
1314 mutex_lock(&nv_subdev(disp)->mutex);
1315 if (!list_empty(&nv_engine(disp)->contexts))
1317 mutex_unlock(&nv_subdev(disp)->mutex);
1322 nv50_disp_cclass = {
1323 .ofuncs = &(struct nvkm_ofuncs) {
1324 .ctor = nv50_disp_data_ctor,
1325 .dtor = _nvkm_gpuobj_dtor,
1326 .init = _nvkm_gpuobj_init,
1327 .fini = _nvkm_gpuobj_fini,
1328 .rd32 = _nvkm_gpuobj_rd32,
1329 .wr32 = _nvkm_gpuobj_wr32,
1333 /*******************************************************************************
1334 * Display engine implementation
1335 ******************************************************************************/
1338 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
1340 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
1341 struct nvkm_device *device = disp->engine.subdev.device;
1342 nvkm_mask(device, 0x61002c, (4 << head), 0);
1346 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
1348 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
1349 struct nvkm_device *device = disp->engine.subdev.device;
1350 nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
1353 const struct nvkm_event_func
1354 nv50_disp_vblank_func = {
1355 .ctor = nvkm_disp_vblank_ctor,
1356 .init = nv50_disp_vblank_init,
1357 .fini = nv50_disp_vblank_fini,
1360 static const struct nvkm_enum
1361 nv50_disp_intr_error_type[] = {
1362 { 3, "ILLEGAL_MTHD" },
1363 { 4, "INVALID_VALUE" },
1364 { 5, "INVALID_STATE" },
1365 { 7, "INVALID_HANDLE" },
1369 static const struct nvkm_enum
1370 nv50_disp_intr_error_code[] = {
1376 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
1378 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass;
1379 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1380 struct nvkm_device *device = subdev->device;
1381 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
1382 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
1383 u32 code = (addr & 0x00ff0000) >> 16;
1384 u32 type = (addr & 0x00007000) >> 12;
1385 u32 mthd = (addr & 0x00000ffc);
1386 const struct nvkm_enum *ec, *et;
1388 et = nvkm_enum_find(nv50_disp_intr_error_type, type);
1389 ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
1392 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
1393 type, et ? et->name : "", code, ec ? ec->name : "",
1399 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 0,
1409 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 1,
1419 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 3,
1427 nvkm_wr32(device, 0x610020, 0x00010000 << chid);
1428 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
1431 static struct nvkm_output *
1432 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
1433 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
1434 struct nvbios_outp *info)
1436 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1437 struct nvkm_bios *bios = subdev->device->bios;
1438 struct nvkm_output *outp;
1442 type = DCB_OUTPUT_ANALOG;
1446 switch (ctrl & 0x00000f00) {
1447 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
1448 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
1449 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
1450 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
1451 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
1452 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
1454 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
1462 switch (ctrl & 0x00000f00) {
1463 case 0x00000000: type |= disp->pior.type[or]; break;
1465 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
1470 mask = 0x00c0 & (mask << 6);
1471 mask |= 0x0001 << or;
1472 mask |= 0x0100 << head;
1474 list_for_each_entry(outp, &disp->base.outp, head) {
1475 if ((outp->info.hasht & 0xff) == type &&
1476 (outp->info.hashm & mask) == mask) {
1477 *data = nvbios_outp_match(bios, outp->info.hasht,
1479 ver, hdr, cnt, len, info);
1489 static struct nvkm_output *
1490 exec_script(struct nv50_disp *disp, int head, int id)
1492 struct nvkm_device *device = disp->base.engine.subdev.device;
1493 struct nvkm_bios *bios = device->bios;
1494 struct nvkm_output *outp;
1495 struct nvbios_outp info;
1496 u8 ver, hdr, cnt, len;
1502 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++)
1503 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
1506 if (!(ctrl & (1 << head))) {
1507 if (nv_device(disp)->chipset < 0x90 ||
1508 nv_device(disp)->chipset == 0x92 ||
1509 nv_device(disp)->chipset == 0xa0) {
1514 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++)
1515 ctrl = nvkm_rd32(device, reg + (i * 8));
1520 if (!(ctrl & (1 << head))) {
1521 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++)
1522 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
1526 if (!(ctrl & (1 << head)))
1530 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
1532 struct nvbios_init init = {
1533 .subdev = nv_subdev(disp),
1535 .offset = info.script[id],
1536 .outp = &outp->info,
1547 static struct nvkm_output *
1548 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
1550 struct nvkm_device *device = disp->base.engine.subdev.device;
1551 struct nvkm_bios *bios = device->bios;
1552 struct nvkm_output *outp;
1553 struct nvbios_outp info1;
1554 struct nvbios_ocfg info2;
1555 u8 ver, hdr, cnt, len;
1561 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++)
1562 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
1565 if (!(ctrl & (1 << head))) {
1566 if (nv_device(disp)->chipset < 0x90 ||
1567 nv_device(disp)->chipset == 0x92 ||
1568 nv_device(disp)->chipset == 0xa0) {
1573 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++)
1574 ctrl = nvkm_rd32(device, reg + (i * 8));
1579 if (!(ctrl & (1 << head))) {
1580 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++)
1581 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
1585 if (!(ctrl & (1 << head)))
1589 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
1593 if (outp->info.location == 0) {
1594 switch (outp->info.type) {
1595 case DCB_OUTPUT_TMDS:
1596 *conf = (ctrl & 0x00000f00) >> 8;
1600 case DCB_OUTPUT_LVDS:
1601 *conf = disp->sor.lvdsconf;
1604 *conf = (ctrl & 0x00000f00) >> 8;
1606 case DCB_OUTPUT_ANALOG:
1612 *conf = (ctrl & 0x00000f00) >> 8;
1616 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
1617 if (data && id < 0xff) {
1618 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
1620 struct nvbios_init init = {
1621 .subdev = nv_subdev(disp),
1624 .outp = &outp->info,
1637 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
1639 exec_script(disp, head, 1);
1643 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
1645 struct nvkm_output *outp = exec_script(disp, head, 2);
1647 /* the binary driver does this outside of the supervisor handling
1648 * (after the third supervisor from a detach). we (currently?)
1649 * allow both detach/attach to happen in the same set of
1650 * supervisor interrupts, so it would make sense to execute this
1651 * (full power down?) script after all the detach phases of the
1652 * supervisor handling. like with training if needed from the
1653 * second supervisor, nvidia doesn't do this, so who knows if it's
1654 * entirely safe, but it does appear to work..
1656 * without this script being run, on some configurations i've
1657 * seen, switching from DP to TMDS on a DP connector may result
1658 * in a blank screen (SOR_PWR off/on can restore it)
1660 if (outp && outp->info.type == DCB_OUTPUT_DP) {
1661 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
1662 struct nvbios_init init = {
1663 .subdev = nv_subdev(disp),
1664 .bios = nvkm_bios(disp),
1665 .outp = &outp->info,
1667 .offset = outpdp->info.script[4],
1672 atomic_set(&outpdp->lt.done, 0);
1677 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
1679 struct nvkm_device *device = disp->base.engine.subdev.device;
1680 struct nvkm_devinit *devinit = device->devinit;
1681 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1683 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
1687 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
1688 struct dcb_output *outp, u32 pclk)
1690 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1691 struct nvkm_device *device = subdev->device;
1692 const int link = !(outp->sorconf.link & 1);
1693 const int or = ffs(outp->or) - 1;
1694 const u32 soff = ( or * 0x800);
1695 const u32 loff = (link * 0x080) + soff;
1696 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
1697 const u32 symbol = 100000;
1698 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
1699 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
1700 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
1701 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
1702 u32 clksor = nvkm_rd32(device, 0x614300 + soff);
1703 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
1704 int TU, VTUi, VTUf, VTUa;
1705 u64 link_data_rate, link_ratio, unk;
1706 u32 best_diff = 64 * symbol;
1707 u32 link_nr, link_bw, bits;
1710 link_bw = (clksor & 0x000c0000) ? 270000 : 162000;
1711 link_nr = hweight32(dpctrl & 0x000f0000);
1713 /* symbols/hblank - algorithm taken from comments in tegra driver */
1714 value = vblanke + vactive - vblanks - 7;
1715 value = value * link_bw;
1716 do_div(value, pclk);
1717 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1718 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
1720 /* symbols/vblank - algorithm taken from comments in tegra driver */
1721 value = vblanks - vblanke - 25;
1722 value = value * link_bw;
1723 do_div(value, pclk);
1724 value = value - ((36 / link_nr) + 3) - 1;
1725 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
1727 /* watermark / activesym */
1728 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
1729 else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
1732 link_data_rate = (pclk * bits / 8) / link_nr;
1734 /* calculate ratio of packed data rate to link symbol rate */
1735 link_ratio = link_data_rate * symbol;
1736 do_div(link_ratio, link_bw);
1738 for (TU = 64; TU >= 32; TU--) {
1739 /* calculate average number of valid symbols in each TU */
1740 u32 tu_valid = link_ratio * TU;
1743 /* find a hw representation for the fraction.. */
1744 VTUi = tu_valid / symbol;
1745 calc = VTUi * symbol;
1746 diff = tu_valid - calc;
1748 if (diff >= (symbol / 2)) {
1749 VTUf = symbol / (symbol - diff);
1750 if (symbol - (VTUf * diff))
1755 calc += symbol - (symbol / VTUf);
1763 VTUf = min((int)(symbol / diff), 15);
1764 calc += symbol / VTUf;
1767 diff = calc - tu_valid;
1769 /* no remainder, but the hw doesn't like the fractional
1770 * part to be zero. decrement the integer part and
1771 * have the fraction add a whole symbol back
1778 if (diff < best_diff) {
1790 nvkm_error(subdev, "unable to find suitable dp config\n");
1794 /* XXX close to vbios numbers, but not right */
1795 unk = (symbol - link_ratio) * bestTU;
1797 do_div(unk, symbol);
1798 do_div(unk, symbol);
1801 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
1802 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
1804 bestVTUi << 8 | unk);
1808 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
1810 struct nvkm_device *device = disp->base.engine.subdev.device;
1811 struct nvkm_output *outp;
1812 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1813 u32 hval, hreg = 0x614200 + (head * 0x800);
1817 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
1821 /* we allow both encoder attach and detach operations to occur
1822 * within a single supervisor (ie. modeset) sequence. the
1823 * encoder detach scripts quite often switch off power to the
1824 * lanes, which requires the link to be re-trained.
1826 * this is not generally an issue as the sink "must" (heh)
1827 * signal an irq when it's lost sync so the driver can
1830 * however, on some boards, if one does not configure at least
1831 * the gpu side of the link *before* attaching, then various
1832 * things can go horribly wrong (PDISP disappearing from mmio,
1833 * third supervisor never happens, etc).
1835 * the solution is simply to retrain here, if necessary. last
1836 * i checked, the binary driver userspace does not appear to
1837 * trigger this situation (it forces an UPDATE between steps).
1839 if (outp->info.type == DCB_OUTPUT_DP) {
1840 u32 soff = (ffs(outp->info.or) - 1) * 0x08;
1843 if (outp->info.location == 0) {
1844 ctrl = nvkm_rd32(device, 0x610794 + soff);
1847 ctrl = nvkm_rd32(device, 0x610b80 + soff);
1851 switch ((ctrl & 0x000f0000) >> 16) {
1852 case 6: datarate = pclk * 30; break;
1853 case 5: datarate = pclk * 24; break;
1856 datarate = pclk * 18;
1860 if (nvkm_output_dp_train(outp, datarate / soff, true))
1861 OUTP_ERR(outp, "link not trained before attach");
1864 exec_clkcmp(disp, head, 0, pclk, &conf);
1866 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
1867 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
1872 if (!outp->info.location) {
1873 if (outp->info.type == DCB_OUTPUT_DP)
1874 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
1875 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
1876 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1880 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
1886 nvkm_mask(device, hreg, 0x0000000f, hval);
1887 nvkm_mask(device, oreg, mask, oval);
1890 /* If programming a TMDS output on a SOR that can also be configured for
1891 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
1893 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
1894 * the VBIOS scripts on at least one board I have only switch it off on
1895 * link 0, causing a blank display if the output has previously been
1896 * programmed for DisplayPort.
1899 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
1900 struct dcb_output *outp)
1902 struct nvkm_device *device = disp->base.engine.subdev.device;
1903 struct nvkm_bios *bios = device->bios;
1904 const int link = !(outp->sorconf.link & 1);
1905 const int or = ffs(outp->or) - 1;
1906 const u32 loff = (or * 0x800) + (link * 0x80);
1907 const u16 mask = (outp->sorconf.link << 6) | outp->or;
1908 struct dcb_output match;
1911 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
1912 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
1916 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
1918 struct nvkm_device *device = disp->base.engine.subdev.device;
1919 struct nvkm_output *outp;
1920 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1923 outp = exec_clkcmp(disp, head, 1, pclk, &conf);
1927 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
1928 nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
1932 nv50_disp_intr_supervisor(struct work_struct *work)
1934 struct nv50_disp *disp =
1935 container_of(work, struct nv50_disp, supervisor);
1936 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass;
1937 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1938 struct nvkm_device *device = subdev->device;
1939 u32 super = nvkm_rd32(device, 0x610030);
1942 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
1944 if (disp->super & 0x00000010) {
1945 nv50_disp_mthd_chan(disp, NV_DBG_DEBUG, 0, impl->mthd.core);
1946 for (head = 0; head < disp->head.nr; head++) {
1947 if (!(super & (0x00000020 << head)))
1949 if (!(super & (0x00000080 << head)))
1951 nv50_disp_intr_unk10_0(disp, head);
1954 if (disp->super & 0x00000020) {
1955 for (head = 0; head < disp->head.nr; head++) {
1956 if (!(super & (0x00000080 << head)))
1958 nv50_disp_intr_unk20_0(disp, head);
1960 for (head = 0; head < disp->head.nr; head++) {
1961 if (!(super & (0x00000200 << head)))
1963 nv50_disp_intr_unk20_1(disp, head);
1965 for (head = 0; head < disp->head.nr; head++) {
1966 if (!(super & (0x00000080 << head)))
1968 nv50_disp_intr_unk20_2(disp, head);
1971 if (disp->super & 0x00000040) {
1972 for (head = 0; head < disp->head.nr; head++) {
1973 if (!(super & (0x00000080 << head)))
1975 nv50_disp_intr_unk40_0(disp, head);
1979 nvkm_wr32(device, 0x610030, 0x80000000);
1983 nv50_disp_intr(struct nvkm_subdev *subdev)
1985 struct nv50_disp *disp = (void *)subdev;
1986 struct nvkm_device *device = disp->base.engine.subdev.device;
1987 u32 intr0 = nvkm_rd32(device, 0x610020);
1988 u32 intr1 = nvkm_rd32(device, 0x610024);
1990 while (intr0 & 0x001f0000) {
1991 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
1992 nv50_disp_intr_error(disp, chid);
1993 intr0 &= ~(0x00010000 << chid);
1996 while (intr0 & 0x0000001f) {
1997 u32 chid = __ffs(intr0 & 0x0000001f);
1998 nv50_disp_chan_uevent_send(disp, chid);
1999 intr0 &= ~(0x00000001 << chid);
2002 if (intr1 & 0x00000004) {
2003 nvkm_disp_vblank(&disp->base, 0);
2004 nvkm_wr32(device, 0x610024, 0x00000004);
2007 if (intr1 & 0x00000008) {
2008 nvkm_disp_vblank(&disp->base, 1);
2009 nvkm_wr32(device, 0x610024, 0x00000008);
2012 if (intr1 & 0x00000070) {
2013 disp->super = (intr1 & 0x00000070);
2014 schedule_work(&disp->supervisor);
2015 nvkm_wr32(device, 0x610024, disp->super);
2020 nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
2021 struct nvkm_oclass *oclass, void *data, u32 size,
2022 struct nvkm_object **pobject)
2024 struct nv50_disp *disp;
2027 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
2029 *pobject = nv_object(disp);
2033 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &disp->uevent);
2037 nv_engine(disp)->sclass = nv50_disp_main_oclass;
2038 nv_engine(disp)->cclass = &nv50_disp_cclass;
2039 nv_subdev(disp)->intr = nv50_disp_intr;
2040 INIT_WORK(&disp->supervisor, nv50_disp_intr_supervisor);
2041 disp->sclass = nv50_disp_sclass;
2046 disp->dac.power = nv50_dac_power;
2047 disp->dac.sense = nv50_dac_sense;
2048 disp->sor.power = nv50_sor_power;
2049 disp->pior.power = nv50_pior_power;
2053 struct nvkm_oclass *
2054 nv50_disp_oclass = &(struct nv50_disp_impl) {
2055 .base.base.handle = NV_ENGINE(DISP, 0x50),
2056 .base.base.ofuncs = &(struct nvkm_ofuncs) {
2057 .ctor = nv50_disp_ctor,
2058 .dtor = _nvkm_disp_dtor,
2059 .init = _nvkm_disp_init,
2060 .fini = _nvkm_disp_fini,
2062 .base.outp.internal.crt = nv50_dac_output_new,
2063 .base.outp.internal.tmds = nv50_sor_output_new,
2064 .base.outp.internal.lvds = nv50_sor_output_new,
2065 .base.outp.external.tmds = nv50_pior_output_new,
2066 .base.outp.external.dp = nv50_pior_dp_new,
2067 .base.vblank = &nv50_disp_vblank_func,
2068 .mthd.core = &nv50_disp_core_mthd_chan,
2069 .mthd.base = &nv50_disp_base_mthd_chan,
2070 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
2071 .mthd.prev = 0x000004,
2072 .head.scanoutpos = nv50_disp_main_scanoutpos,