Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / nouveau / nouveau_object.c
1 /*
2  * Copyright (C) 2006 Ben Skeggs.
3  *
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  */
27
28 /*
29  * Authors:
30  *   Ben Skeggs <darktama@iinet.net.au>
31  */
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_fifo.h"
38 #include "nouveau_ramht.h"
39 #include "nouveau_software.h"
40 #include "nouveau_vm.h"
41
42 struct nouveau_gpuobj_method {
43         struct list_head head;
44         u32 mthd;
45         int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
46 };
47
48 struct nouveau_gpuobj_class {
49         struct list_head head;
50         struct list_head methods;
51         u32 id;
52         u32 engine;
53 };
54
55 int
56 nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
57 {
58         struct drm_nouveau_private *dev_priv = dev->dev_private;
59         struct nouveau_gpuobj_class *oc;
60
61         oc = kzalloc(sizeof(*oc), GFP_KERNEL);
62         if (!oc)
63                 return -ENOMEM;
64
65         INIT_LIST_HEAD(&oc->methods);
66         oc->id = class;
67         oc->engine = engine;
68         list_add(&oc->head, &dev_priv->classes);
69         return 0;
70 }
71
72 int
73 nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
74                         int (*exec)(struct nouveau_channel *, u32, u32, u32))
75 {
76         struct drm_nouveau_private *dev_priv = dev->dev_private;
77         struct nouveau_gpuobj_method *om;
78         struct nouveau_gpuobj_class *oc;
79
80         list_for_each_entry(oc, &dev_priv->classes, head) {
81                 if (oc->id == class)
82                         goto found;
83         }
84
85         return -EINVAL;
86
87 found:
88         om = kzalloc(sizeof(*om), GFP_KERNEL);
89         if (!om)
90                 return -ENOMEM;
91
92         om->mthd = mthd;
93         om->exec = exec;
94         list_add(&om->head, &oc->methods);
95         return 0;
96 }
97
98 int
99 nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
100                          u32 class, u32 mthd, u32 data)
101 {
102         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
103         struct nouveau_gpuobj_method *om;
104         struct nouveau_gpuobj_class *oc;
105
106         list_for_each_entry(oc, &dev_priv->classes, head) {
107                 if (oc->id != class)
108                         continue;
109
110                 list_for_each_entry(om, &oc->methods, head) {
111                         if (om->mthd == mthd)
112                                 return om->exec(chan, class, mthd, data);
113                 }
114         }
115
116         return -ENOENT;
117 }
118
119 int
120 nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
121                           u32 class, u32 mthd, u32 data)
122 {
123         struct drm_nouveau_private *dev_priv = dev->dev_private;
124         struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
125         struct nouveau_channel *chan = NULL;
126         unsigned long flags;
127         int ret = -EINVAL;
128
129         spin_lock_irqsave(&dev_priv->channels.lock, flags);
130         if (chid >= 0 && chid < pfifo->channels)
131                 chan = dev_priv->channels.ptr[chid];
132         if (chan)
133                 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
134         spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
135         return ret;
136 }
137
138 int
139 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
140                    uint32_t size, int align, uint32_t flags,
141                    struct nouveau_gpuobj **gpuobj_ret)
142 {
143         struct drm_nouveau_private *dev_priv = dev->dev_private;
144         struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
145         struct nouveau_gpuobj *gpuobj;
146         struct drm_mm_node *ramin = NULL;
147         int ret, i;
148
149         NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
150                  chan ? chan->id : -1, size, align, flags);
151
152         gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
153         if (!gpuobj)
154                 return -ENOMEM;
155         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
156         gpuobj->dev = dev;
157         gpuobj->flags = flags;
158         kref_init(&gpuobj->refcount);
159         gpuobj->size = size;
160
161         spin_lock(&dev_priv->ramin_lock);
162         list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
163         spin_unlock(&dev_priv->ramin_lock);
164
165         if (!(flags & NVOBJ_FLAG_VM) && chan) {
166                 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
167                 if (ramin)
168                         ramin = drm_mm_get_block(ramin, size, align);
169                 if (!ramin) {
170                         nouveau_gpuobj_ref(NULL, &gpuobj);
171                         return -ENOMEM;
172                 }
173
174                 gpuobj->pinst = chan->ramin->pinst;
175                 if (gpuobj->pinst != ~0)
176                         gpuobj->pinst += ramin->start;
177
178                 gpuobj->cinst = ramin->start;
179                 gpuobj->vinst = ramin->start + chan->ramin->vinst;
180                 gpuobj->node  = ramin;
181         } else {
182                 ret = instmem->get(gpuobj, chan, size, align);
183                 if (ret) {
184                         nouveau_gpuobj_ref(NULL, &gpuobj);
185                         return ret;
186                 }
187
188                 ret = -ENOSYS;
189                 if (!(flags & NVOBJ_FLAG_DONT_MAP))
190                         ret = instmem->map(gpuobj);
191                 if (ret)
192                         gpuobj->pinst = ~0;
193
194                 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
195         }
196
197         if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
198                 for (i = 0; i < gpuobj->size; i += 4)
199                         nv_wo32(gpuobj, i, 0);
200                 instmem->flush(dev);
201         }
202
203
204         *gpuobj_ret = gpuobj;
205         return 0;
206 }
207
208 int
209 nouveau_gpuobj_init(struct drm_device *dev)
210 {
211         struct drm_nouveau_private *dev_priv = dev->dev_private;
212
213         NV_DEBUG(dev, "\n");
214
215         INIT_LIST_HEAD(&dev_priv->gpuobj_list);
216         INIT_LIST_HEAD(&dev_priv->classes);
217         spin_lock_init(&dev_priv->ramin_lock);
218         dev_priv->ramin_base = ~0;
219
220         return 0;
221 }
222
223 void
224 nouveau_gpuobj_takedown(struct drm_device *dev)
225 {
226         struct drm_nouveau_private *dev_priv = dev->dev_private;
227         struct nouveau_gpuobj_method *om, *tm;
228         struct nouveau_gpuobj_class *oc, *tc;
229
230         NV_DEBUG(dev, "\n");
231
232         list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
233                 list_for_each_entry_safe(om, tm, &oc->methods, head) {
234                         list_del(&om->head);
235                         kfree(om);
236                 }
237                 list_del(&oc->head);
238                 kfree(oc);
239         }
240
241         WARN_ON(!list_empty(&dev_priv->gpuobj_list));
242 }
243
244
245 static void
246 nouveau_gpuobj_del(struct kref *ref)
247 {
248         struct nouveau_gpuobj *gpuobj =
249                 container_of(ref, struct nouveau_gpuobj, refcount);
250         struct drm_device *dev = gpuobj->dev;
251         struct drm_nouveau_private *dev_priv = dev->dev_private;
252         struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
253         int i;
254
255         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
256
257         if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
258                 for (i = 0; i < gpuobj->size; i += 4)
259                         nv_wo32(gpuobj, i, 0);
260                 instmem->flush(dev);
261         }
262
263         if (gpuobj->dtor)
264                 gpuobj->dtor(dev, gpuobj);
265
266         if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
267                 if (gpuobj->node) {
268                         instmem->unmap(gpuobj);
269                         instmem->put(gpuobj);
270                 }
271         } else {
272                 if (gpuobj->node) {
273                         spin_lock(&dev_priv->ramin_lock);
274                         drm_mm_put_block(gpuobj->node);
275                         spin_unlock(&dev_priv->ramin_lock);
276                 }
277         }
278
279         spin_lock(&dev_priv->ramin_lock);
280         list_del(&gpuobj->list);
281         spin_unlock(&dev_priv->ramin_lock);
282
283         kfree(gpuobj);
284 }
285
286 void
287 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
288 {
289         if (ref)
290                 kref_get(&ref->refcount);
291
292         if (*ptr)
293                 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
294
295         *ptr = ref;
296 }
297
298 int
299 nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
300                         u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
301 {
302         struct drm_nouveau_private *dev_priv = dev->dev_private;
303         struct nouveau_gpuobj *gpuobj = NULL;
304         int i;
305
306         NV_DEBUG(dev,
307                  "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
308                  pinst, vinst, size, flags);
309
310         gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
311         if (!gpuobj)
312                 return -ENOMEM;
313         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
314         gpuobj->dev = dev;
315         gpuobj->flags = flags;
316         kref_init(&gpuobj->refcount);
317         gpuobj->size  = size;
318         gpuobj->pinst = pinst;
319         gpuobj->cinst = NVOBJ_CINST_GLOBAL;
320         gpuobj->vinst = vinst;
321
322         if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
323                 for (i = 0; i < gpuobj->size; i += 4)
324                         nv_wo32(gpuobj, i, 0);
325                 dev_priv->engine.instmem.flush(dev);
326         }
327
328         spin_lock(&dev_priv->ramin_lock);
329         list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
330         spin_unlock(&dev_priv->ramin_lock);
331         *pgpuobj = gpuobj;
332         return 0;
333 }
334
335 void
336 nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
337                      u64 base, u64 size, int target, int access,
338                      u32 type, u32 comp)
339 {
340         struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
341         struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
342         u32 flags0;
343
344         flags0  = (comp << 29) | (type << 22) | class;
345         flags0 |= 0x00100000;
346
347         switch (access) {
348         case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
349         case NV_MEM_ACCESS_RW:
350         case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
351         default:
352                 break;
353         }
354
355         switch (target) {
356         case NV_MEM_TARGET_VRAM:
357                 flags0 |= 0x00010000;
358                 break;
359         case NV_MEM_TARGET_PCI:
360                 flags0 |= 0x00020000;
361                 break;
362         case NV_MEM_TARGET_PCI_NOSNOOP:
363                 flags0 |= 0x00030000;
364                 break;
365         case NV_MEM_TARGET_GART:
366                 base += dev_priv->gart_info.aper_base;
367         default:
368                 flags0 &= ~0x00100000;
369                 break;
370         }
371
372         /* convert to base + limit */
373         size = (base + size) - 1;
374
375         nv_wo32(obj, offset + 0x00, flags0);
376         nv_wo32(obj, offset + 0x04, lower_32_bits(size));
377         nv_wo32(obj, offset + 0x08, lower_32_bits(base));
378         nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
379                                     upper_32_bits(base));
380         nv_wo32(obj, offset + 0x10, 0x00000000);
381         nv_wo32(obj, offset + 0x14, 0x00000000);
382
383         pinstmem->flush(obj->dev);
384 }
385
386 int
387 nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
388                     int target, int access, u32 type, u32 comp,
389                     struct nouveau_gpuobj **pobj)
390 {
391         struct drm_device *dev = chan->dev;
392         int ret;
393
394         ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
395         if (ret)
396                 return ret;
397
398         nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
399                              access, type, comp);
400         return 0;
401 }
402
403 int
404 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
405                        u64 size, int access, int target,
406                        struct nouveau_gpuobj **pobj)
407 {
408         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
409         struct drm_device *dev = chan->dev;
410         struct nouveau_gpuobj *obj;
411         u32 flags0, flags2;
412         int ret;
413
414         if (dev_priv->card_type >= NV_50) {
415                 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
416                 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
417
418                 return nv50_gpuobj_dma_new(chan, class, base, size,
419                                            target, access, type, comp, pobj);
420         }
421
422         if (target == NV_MEM_TARGET_GART) {
423                 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
424
425                 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
426                         if (base == 0) {
427                                 nouveau_gpuobj_ref(gart, pobj);
428                                 return 0;
429                         }
430
431                         base   = nouveau_sgdma_get_physical(dev, base);
432                         target = NV_MEM_TARGET_PCI;
433                 } else {
434                         base += dev_priv->gart_info.aper_base;
435                         if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
436                                 target = NV_MEM_TARGET_PCI_NOSNOOP;
437                         else
438                                 target = NV_MEM_TARGET_PCI;
439                 }
440         }
441
442         flags0  = class;
443         flags0 |= 0x00003000; /* PT present, PT linear */
444         flags2  = 0;
445
446         switch (target) {
447         case NV_MEM_TARGET_PCI:
448                 flags0 |= 0x00020000;
449                 break;
450         case NV_MEM_TARGET_PCI_NOSNOOP:
451                 flags0 |= 0x00030000;
452                 break;
453         default:
454                 break;
455         }
456
457         switch (access) {
458         case NV_MEM_ACCESS_RO:
459                 flags0 |= 0x00004000;
460                 break;
461         case NV_MEM_ACCESS_WO:
462                 flags0 |= 0x00008000;
463         default:
464                 flags2 |= 0x00000002;
465                 break;
466         }
467
468         flags0 |= (base & 0x00000fff) << 20;
469         flags2 |= (base & 0xfffff000);
470
471         ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
472         if (ret)
473                 return ret;
474
475         nv_wo32(obj, 0x00, flags0);
476         nv_wo32(obj, 0x04, size - 1);
477         nv_wo32(obj, 0x08, flags2);
478         nv_wo32(obj, 0x0c, flags2);
479
480         obj->engine = NVOBJ_ENGINE_SW;
481         obj->class  = class;
482         *pobj = obj;
483         return 0;
484 }
485
486 int
487 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
488 {
489         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
490         struct drm_device *dev = chan->dev;
491         struct nouveau_gpuobj_class *oc;
492         int ret;
493
494         NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
495
496         list_for_each_entry(oc, &dev_priv->classes, head) {
497                 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
498
499                 if (oc->id != class)
500                         continue;
501
502                 if (!chan->engctx[oc->engine]) {
503                         ret = eng->context_new(chan, oc->engine);
504                         if (ret)
505                                 return ret;
506                 }
507
508                 return eng->object_new(chan, oc->engine, handle, class);
509         }
510
511         return -EINVAL;
512 }
513
514 static int
515 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
516 {
517         struct drm_device *dev = chan->dev;
518         struct drm_nouveau_private *dev_priv = dev->dev_private;
519         uint32_t size;
520         uint32_t base;
521         int ret;
522
523         NV_DEBUG(dev, "ch%d\n", chan->id);
524
525         /* Base amount for object storage (4KiB enough?) */
526         size = 0x2000;
527         base = 0;
528
529         if (dev_priv->card_type == NV_50) {
530                 /* Various fixed table thingos */
531                 size += 0x1400; /* mostly unknown stuff */
532                 size += 0x4000; /* vm pd */
533                 base  = 0x6000;
534                 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
535                 size += 0x8000;
536                 /* RAMFC */
537                 size += 0x1000;
538         }
539
540         ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
541         if (ret) {
542                 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
543                 return ret;
544         }
545
546         ret = drm_mm_init(&chan->ramin_heap, base, size - base);
547         if (ret) {
548                 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
549                 nouveau_gpuobj_ref(NULL, &chan->ramin);
550                 return ret;
551         }
552
553         return 0;
554 }
555
556 static int
557 nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
558 {
559         struct drm_device *dev = chan->dev;
560         struct nouveau_gpuobj *pgd = NULL;
561         struct nouveau_vm_pgd *vpgd;
562         int ret;
563
564         ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
565         if (ret)
566                 return ret;
567
568         /* create page directory for this vm if none currently exists,
569          * will be destroyed automagically when last reference to the
570          * vm is removed
571          */
572         if (list_empty(&vm->pgd_list)) {
573                 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
574                 if (ret)
575                         return ret;
576         }
577         nouveau_vm_ref(vm, &chan->vm, pgd);
578         nouveau_gpuobj_ref(NULL, &pgd);
579
580         /* point channel at vm's page directory */
581         vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
582         nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
583         nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
584         nv_wo32(chan->ramin, 0x0208, 0xffffffff);
585         nv_wo32(chan->ramin, 0x020c, 0x000000ff);
586
587         return 0;
588 }
589
590 int
591 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
592                             uint32_t vram_h, uint32_t tt_h)
593 {
594         struct drm_device *dev = chan->dev;
595         struct drm_nouveau_private *dev_priv = dev->dev_private;
596         struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
597         struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
598         struct nouveau_gpuobj *vram = NULL, *tt = NULL;
599         int ret;
600
601         NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
602         if (dev_priv->card_type >= NV_C0)
603                 return nvc0_gpuobj_channel_init(chan, vm);
604
605         /* Allocate a chunk of memory for per-channel object storage */
606         ret = nouveau_gpuobj_channel_init_pramin(chan);
607         if (ret) {
608                 NV_ERROR(dev, "init pramin\n");
609                 return ret;
610         }
611
612         /* NV50 VM
613          *  - Allocate per-channel page-directory
614          *  - Link with shared channel VM
615          */
616         if (vm) {
617                 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
618                 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
619                 u32 vm_pinst = chan->ramin->pinst;
620
621                 if (vm_pinst != ~0)
622                         vm_pinst += pgd_offs;
623
624                 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
625                                               0, &chan->vm_pd);
626                 if (ret)
627                         return ret;
628
629                 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
630         }
631
632         /* RAMHT */
633         if (dev_priv->card_type < NV_50) {
634                 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
635         } else {
636                 struct nouveau_gpuobj *ramht = NULL;
637
638                 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
639                                          NVOBJ_FLAG_ZERO_ALLOC, &ramht);
640                 if (ret)
641                         return ret;
642
643                 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
644                 nouveau_gpuobj_ref(NULL, &ramht);
645                 if (ret)
646                         return ret;
647         }
648
649         /* VRAM ctxdma */
650         if (dev_priv->card_type >= NV_50) {
651                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
652                                              0, (1ULL << 40), NV_MEM_ACCESS_RW,
653                                              NV_MEM_TARGET_VM, &vram);
654                 if (ret) {
655                         NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
656                         return ret;
657                 }
658         } else {
659                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
660                                              0, dev_priv->fb_available_size,
661                                              NV_MEM_ACCESS_RW,
662                                              NV_MEM_TARGET_VRAM, &vram);
663                 if (ret) {
664                         NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
665                         return ret;
666                 }
667         }
668
669         ret = nouveau_ramht_insert(chan, vram_h, vram);
670         nouveau_gpuobj_ref(NULL, &vram);
671         if (ret) {
672                 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
673                 return ret;
674         }
675
676         /* TT memory ctxdma */
677         if (dev_priv->card_type >= NV_50) {
678                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
679                                              0, (1ULL << 40), NV_MEM_ACCESS_RW,
680                                              NV_MEM_TARGET_VM, &tt);
681         } else {
682                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
683                                              0, dev_priv->gart_info.aper_size,
684                                              NV_MEM_ACCESS_RW,
685                                              NV_MEM_TARGET_GART, &tt);
686         }
687
688         if (ret) {
689                 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
690                 return ret;
691         }
692
693         ret = nouveau_ramht_insert(chan, tt_h, tt);
694         nouveau_gpuobj_ref(NULL, &tt);
695         if (ret) {
696                 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
697                 return ret;
698         }
699
700         return 0;
701 }
702
703 void
704 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
705 {
706         NV_DEBUG(chan->dev, "ch%d\n", chan->id);
707
708         nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
709         nouveau_gpuobj_ref(NULL, &chan->vm_pd);
710
711         if (drm_mm_initialized(&chan->ramin_heap))
712                 drm_mm_takedown(&chan->ramin_heap);
713         nouveau_gpuobj_ref(NULL, &chan->ramin);
714 }
715
716 int
717 nouveau_gpuobj_suspend(struct drm_device *dev)
718 {
719         struct drm_nouveau_private *dev_priv = dev->dev_private;
720         struct nouveau_gpuobj *gpuobj;
721         int i;
722
723         list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
724                 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
725                         continue;
726
727                 gpuobj->suspend = vmalloc(gpuobj->size);
728                 if (!gpuobj->suspend) {
729                         nouveau_gpuobj_resume(dev);
730                         return -ENOMEM;
731                 }
732
733                 for (i = 0; i < gpuobj->size; i += 4)
734                         gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
735         }
736
737         return 0;
738 }
739
740 void
741 nouveau_gpuobj_resume(struct drm_device *dev)
742 {
743         struct drm_nouveau_private *dev_priv = dev->dev_private;
744         struct nouveau_gpuobj *gpuobj;
745         int i;
746
747         list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
748                 if (!gpuobj->suspend)
749                         continue;
750
751                 for (i = 0; i < gpuobj->size; i += 4)
752                         nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
753
754                 vfree(gpuobj->suspend);
755                 gpuobj->suspend = NULL;
756         }
757
758         dev_priv->engine.instmem.flush(dev);
759 }
760
761 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
762                               struct drm_file *file_priv)
763 {
764         struct drm_nouveau_grobj_alloc *init = data;
765         struct nouveau_channel *chan;
766         int ret;
767
768         if (init->handle == ~0)
769                 return -EINVAL;
770
771         /* compatibility with userspace that assumes 506e for all chipsets */
772         if (init->class == 0x506e) {
773                 init->class = nouveau_software_class(dev);
774                 if (init->class == 0x906e)
775                         return 0;
776         } else
777         if (init->class == 0x906e) {
778                 NV_ERROR(dev, "906e not supported yet\n");
779                 return -EINVAL;
780         }
781
782         chan = nouveau_channel_get(file_priv, init->channel);
783         if (IS_ERR(chan))
784                 return PTR_ERR(chan);
785
786         if (nouveau_ramht_find(chan, init->handle)) {
787                 ret = -EEXIST;
788                 goto out;
789         }
790
791         ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
792         if (ret) {
793                 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
794                          ret, init->channel, init->handle);
795         }
796
797 out:
798         nouveau_channel_put(&chan);
799         return ret;
800 }
801
802 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
803                               struct drm_file *file_priv)
804 {
805         struct drm_nouveau_gpuobj_free *objfree = data;
806         struct nouveau_channel *chan;
807         int ret;
808
809         chan = nouveau_channel_get(file_priv, objfree->channel);
810         if (IS_ERR(chan))
811                 return PTR_ERR(chan);
812
813         /* Synchronize with the user channel */
814         nouveau_channel_idle(chan);
815
816         ret = nouveau_ramht_remove(chan, objfree->handle);
817         nouveau_channel_put(&chan);
818         return ret;
819 }
820
821 u32
822 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
823 {
824         struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
825         struct drm_device *dev = gpuobj->dev;
826         unsigned long flags;
827
828         if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
829                 u64  ptr = gpuobj->vinst + offset;
830                 u32 base = ptr >> 16;
831                 u32  val;
832
833                 spin_lock_irqsave(&dev_priv->vm_lock, flags);
834                 if (dev_priv->ramin_base != base) {
835                         dev_priv->ramin_base = base;
836                         nv_wr32(dev, 0x001700, dev_priv->ramin_base);
837                 }
838                 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
839                 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
840                 return val;
841         }
842
843         return nv_ri32(dev, gpuobj->pinst + offset);
844 }
845
846 void
847 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
848 {
849         struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
850         struct drm_device *dev = gpuobj->dev;
851         unsigned long flags;
852
853         if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
854                 u64  ptr = gpuobj->vinst + offset;
855                 u32 base = ptr >> 16;
856
857                 spin_lock_irqsave(&dev_priv->vm_lock, flags);
858                 if (dev_priv->ramin_base != base) {
859                         dev_priv->ramin_base = base;
860                         nv_wr32(dev, 0x001700, dev_priv->ramin_base);
861                 }
862                 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
863                 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
864                 return;
865         }
866
867         nv_wi32(dev, gpuobj->pinst + offset, val);
868 }