Merge branches 'fixes' and 'fixes2' into devel-late
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10 struct nouveau_sgdma_be {
11         /* this has to be the first field so populate/unpopulated in
12          * nouve_bo.c works properly, otherwise have to move them here
13          */
14         struct ttm_dma_tt ttm;
15         struct drm_device *dev;
16         u64 offset;
17 };
18
19 static void
20 nouveau_sgdma_destroy(struct ttm_tt *ttm)
21 {
22         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23
24         if (ttm) {
25                 NV_DEBUG(nvbe->dev, "\n");
26                 ttm_dma_tt_fini(&nvbe->ttm);
27                 kfree(nvbe);
28         }
29 }
30
31 static int
32 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33 {
34         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35         struct drm_device *dev = nvbe->dev;
36         struct drm_nouveau_private *dev_priv = dev->dev_private;
37         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
38         unsigned i, j, pte;
39
40         NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
41
42         nvbe->offset = mem->start << PAGE_SHIFT;
43         pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
44         for (i = 0; i < ttm->num_pages; i++) {
45                 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
46                 uint32_t offset_l = lower_32_bits(dma_offset);
47
48                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
49                         nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
50                         offset_l += NV_CTXDMA_PAGE_SIZE;
51                 }
52         }
53
54         return 0;
55 }
56
57 static int
58 nv04_sgdma_unbind(struct ttm_tt *ttm)
59 {
60         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
61         struct drm_device *dev = nvbe->dev;
62         struct drm_nouveau_private *dev_priv = dev->dev_private;
63         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
64         unsigned i, j, pte;
65
66         NV_DEBUG(dev, "\n");
67
68         if (ttm->state != tt_bound)
69                 return 0;
70
71         pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
72         for (i = 0; i < ttm->num_pages; i++) {
73                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
74                         nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
75         }
76
77         return 0;
78 }
79
80 static struct ttm_backend_func nv04_sgdma_backend = {
81         .bind                   = nv04_sgdma_bind,
82         .unbind                 = nv04_sgdma_unbind,
83         .destroy                = nouveau_sgdma_destroy
84 };
85
86 static void
87 nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
88 {
89         struct drm_device *dev = nvbe->dev;
90
91         nv_wr32(dev, 0x100810, 0x00000022);
92         if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
93                 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
94                          nv_rd32(dev, 0x100810));
95         nv_wr32(dev, 0x100810, 0x00000000);
96 }
97
98 static int
99 nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
100 {
101         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
102         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
103         struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
104         dma_addr_t *list = nvbe->ttm.dma_address;
105         u32 pte = mem->start << 2;
106         u32 cnt = ttm->num_pages;
107
108         nvbe->offset = mem->start << PAGE_SHIFT;
109
110         while (cnt--) {
111                 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
112                 pte += 4;
113         }
114
115         nv41_sgdma_flush(nvbe);
116         return 0;
117 }
118
119 static int
120 nv41_sgdma_unbind(struct ttm_tt *ttm)
121 {
122         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
124         struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
125         u32 pte = (nvbe->offset >> 12) << 2;
126         u32 cnt = ttm->num_pages;
127
128         while (cnt--) {
129                 nv_wo32(pgt, pte, 0x00000000);
130                 pte += 4;
131         }
132
133         nv41_sgdma_flush(nvbe);
134         return 0;
135 }
136
137 static struct ttm_backend_func nv41_sgdma_backend = {
138         .bind                   = nv41_sgdma_bind,
139         .unbind                 = nv41_sgdma_unbind,
140         .destroy                = nouveau_sgdma_destroy
141 };
142
143 static void
144 nv44_sgdma_flush(struct ttm_tt *ttm)
145 {
146         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
147         struct drm_device *dev = nvbe->dev;
148
149         nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
150         nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
151         if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
152                 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
153                          nv_rd32(dev, 0x100808));
154         nv_wr32(dev, 0x100808, 0x00000000);
155 }
156
157 static void
158 nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
159 {
160         struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
161         dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
162         u32 pte, tmp[4];
163
164         pte   = base >> 2;
165         base &= ~0x0000000f;
166
167         tmp[0] = nv_ro32(pgt, base + 0x0);
168         tmp[1] = nv_ro32(pgt, base + 0x4);
169         tmp[2] = nv_ro32(pgt, base + 0x8);
170         tmp[3] = nv_ro32(pgt, base + 0xc);
171         while (cnt--) {
172                 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
173                 switch (pte++ & 0x3) {
174                 case 0:
175                         tmp[0] &= ~0x07ffffff;
176                         tmp[0] |= addr;
177                         break;
178                 case 1:
179                         tmp[0] &= ~0xf8000000;
180                         tmp[0] |= addr << 27;
181                         tmp[1] &= ~0x003fffff;
182                         tmp[1] |= addr >> 5;
183                         break;
184                 case 2:
185                         tmp[1] &= ~0xffc00000;
186                         tmp[1] |= addr << 22;
187                         tmp[2] &= ~0x0001ffff;
188                         tmp[2] |= addr >> 10;
189                         break;
190                 case 3:
191                         tmp[2] &= ~0xfffe0000;
192                         tmp[2] |= addr << 17;
193                         tmp[3] &= ~0x00000fff;
194                         tmp[3] |= addr >> 15;
195                         break;
196                 }
197         }
198
199         tmp[3] |= 0x40000000;
200
201         nv_wo32(pgt, base + 0x0, tmp[0]);
202         nv_wo32(pgt, base + 0x4, tmp[1]);
203         nv_wo32(pgt, base + 0x8, tmp[2]);
204         nv_wo32(pgt, base + 0xc, tmp[3]);
205 }
206
207 static int
208 nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
209 {
210         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
211         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
212         struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
213         dma_addr_t *list = nvbe->ttm.dma_address;
214         u32 pte = mem->start << 2, tmp[4];
215         u32 cnt = ttm->num_pages;
216         int i;
217
218         nvbe->offset = mem->start << PAGE_SHIFT;
219
220         if (pte & 0x0000000c) {
221                 u32  max = 4 - ((pte >> 2) & 0x3);
222                 u32 part = (cnt > max) ? max : cnt;
223                 nv44_sgdma_fill(pgt, list, pte, part);
224                 pte  += (part << 2);
225                 list += part;
226                 cnt  -= part;
227         }
228
229         while (cnt >= 4) {
230                 for (i = 0; i < 4; i++)
231                         tmp[i] = *list++ >> 12;
232                 nv_wo32(pgt, pte + 0x0, tmp[0] >>  0 | tmp[1] << 27);
233                 nv_wo32(pgt, pte + 0x4, tmp[1] >>  5 | tmp[2] << 22);
234                 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
235                 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
236                 pte  += 0x10;
237                 cnt  -= 4;
238         }
239
240         if (cnt)
241                 nv44_sgdma_fill(pgt, list, pte, cnt);
242
243         nv44_sgdma_flush(ttm);
244         return 0;
245 }
246
247 static int
248 nv44_sgdma_unbind(struct ttm_tt *ttm)
249 {
250         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
251         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
252         struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
253         u32 pte = (nvbe->offset >> 12) << 2;
254         u32 cnt = ttm->num_pages;
255
256         if (pte & 0x0000000c) {
257                 u32  max = 4 - ((pte >> 2) & 0x3);
258                 u32 part = (cnt > max) ? max : cnt;
259                 nv44_sgdma_fill(pgt, NULL, pte, part);
260                 pte  += (part << 2);
261                 cnt  -= part;
262         }
263
264         while (cnt >= 4) {
265                 nv_wo32(pgt, pte + 0x0, 0x00000000);
266                 nv_wo32(pgt, pte + 0x4, 0x00000000);
267                 nv_wo32(pgt, pte + 0x8, 0x00000000);
268                 nv_wo32(pgt, pte + 0xc, 0x00000000);
269                 pte  += 0x10;
270                 cnt  -= 4;
271         }
272
273         if (cnt)
274                 nv44_sgdma_fill(pgt, NULL, pte, cnt);
275
276         nv44_sgdma_flush(ttm);
277         return 0;
278 }
279
280 static struct ttm_backend_func nv44_sgdma_backend = {
281         .bind                   = nv44_sgdma_bind,
282         .unbind                 = nv44_sgdma_unbind,
283         .destroy                = nouveau_sgdma_destroy
284 };
285
286 static int
287 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
288 {
289         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
290         struct nouveau_mem *node = mem->mm_node;
291
292         /* noop: bound in move_notify() */
293         if (ttm->sg) {
294                 node->sg = ttm->sg;
295         } else
296                 node->pages = nvbe->ttm.dma_address;
297         return 0;
298 }
299
300 static int
301 nv50_sgdma_unbind(struct ttm_tt *ttm)
302 {
303         /* noop: unbound in move_notify() */
304         return 0;
305 }
306
307 static struct ttm_backend_func nv50_sgdma_backend = {
308         .bind                   = nv50_sgdma_bind,
309         .unbind                 = nv50_sgdma_unbind,
310         .destroy                = nouveau_sgdma_destroy
311 };
312
313 struct ttm_tt *
314 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
315                          unsigned long size, uint32_t page_flags,
316                          struct page *dummy_read_page)
317 {
318         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
319         struct drm_device *dev = dev_priv->dev;
320         struct nouveau_sgdma_be *nvbe;
321
322         nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
323         if (!nvbe)
324                 return NULL;
325
326         nvbe->dev = dev;
327         nvbe->ttm.ttm.func = dev_priv->gart_info.func;
328
329         if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
330                 kfree(nvbe);
331                 return NULL;
332         }
333         return &nvbe->ttm.ttm;
334 }
335
336 int
337 nouveau_sgdma_init(struct drm_device *dev)
338 {
339         struct drm_nouveau_private *dev_priv = dev->dev_private;
340         struct nouveau_gpuobj *gpuobj = NULL;
341         u32 aper_size, align;
342         int ret;
343
344         if (dev_priv->card_type >= NV_40)
345                 aper_size = 512 * 1024 * 1024;
346         else
347                 aper_size = 128 * 1024 * 1024;
348
349         /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
350          * christmas.  The cards before it have them, the cards after
351          * it have them, why is NV44 so unloved?
352          */
353         dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
354         if (!dev_priv->gart_info.dummy.page)
355                 return -ENOMEM;
356
357         dev_priv->gart_info.dummy.addr =
358                 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
359                              0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
360         if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
361                 NV_ERROR(dev, "error mapping dummy page\n");
362                 __free_page(dev_priv->gart_info.dummy.page);
363                 dev_priv->gart_info.dummy.page = NULL;
364                 return -ENOMEM;
365         }
366
367         if (dev_priv->card_type >= NV_50) {
368                 dev_priv->gart_info.aper_base = 0;
369                 dev_priv->gart_info.aper_size = aper_size;
370                 dev_priv->gart_info.type = NOUVEAU_GART_HW;
371                 dev_priv->gart_info.func = &nv50_sgdma_backend;
372         } else
373         if (0 && pci_is_pcie(dev->pdev) &&
374             dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
375                 if (nv44_graph_class(dev)) {
376                         dev_priv->gart_info.func = &nv44_sgdma_backend;
377                         align = 512 * 1024;
378                 } else {
379                         dev_priv->gart_info.func = &nv41_sgdma_backend;
380                         align = 16;
381                 }
382
383                 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
384                                          NVOBJ_FLAG_ZERO_ALLOC |
385                                          NVOBJ_FLAG_ZERO_FREE, &gpuobj);
386                 if (ret) {
387                         NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
388                         return ret;
389                 }
390
391                 dev_priv->gart_info.sg_ctxdma = gpuobj;
392                 dev_priv->gart_info.aper_base = 0;
393                 dev_priv->gart_info.aper_size = aper_size;
394                 dev_priv->gart_info.type = NOUVEAU_GART_HW;
395         } else {
396                 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
397                                          NVOBJ_FLAG_ZERO_ALLOC |
398                                          NVOBJ_FLAG_ZERO_FREE, &gpuobj);
399                 if (ret) {
400                         NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
401                         return ret;
402                 }
403
404                 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
405                                    (1 << 12) /* PT present */ |
406                                    (0 << 13) /* PT *not* linear */ |
407                                    (0 << 14) /* RW */ |
408                                    (2 << 16) /* PCI */);
409                 nv_wo32(gpuobj, 4, aper_size - 1);
410
411                 dev_priv->gart_info.sg_ctxdma = gpuobj;
412                 dev_priv->gart_info.aper_base = 0;
413                 dev_priv->gart_info.aper_size = aper_size;
414                 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
415                 dev_priv->gart_info.func = &nv04_sgdma_backend;
416         }
417
418         return 0;
419 }
420
421 void
422 nouveau_sgdma_takedown(struct drm_device *dev)
423 {
424         struct drm_nouveau_private *dev_priv = dev->dev_private;
425
426         nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
427
428         if (dev_priv->gart_info.dummy.page) {
429                 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
430                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
431                 __free_page(dev_priv->gart_info.dummy.page);
432                 dev_priv->gart_info.dummy.page = NULL;
433         }
434 }
435
436 uint32_t
437 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
438 {
439         struct drm_nouveau_private *dev_priv = dev->dev_private;
440         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
441         int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
442
443         BUG_ON(dev_priv->card_type >= NV_50);
444
445         return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
446                 (offset & NV_CTXDMA_PAGE_MASK);
447 }