HID: picolcd: sanity check report size in raw_event() callback
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / nouveau / core / subdev / vm / nvc0.c
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <core/device.h>
26 #include <core/gpuobj.h>
27
28 #include <subdev/timer.h>
29 #include <subdev/fb.h>
30 #include <subdev/vm.h>
31 #include <subdev/ltcg.h>
32 #include <subdev/bar.h>
33
34 struct nvc0_vmmgr_priv {
35         struct nouveau_vmmgr base;
36 };
37
38
39 /* Map from compressed to corresponding uncompressed storage type.
40  * The value 0xff represents an invalid storage type.
41  */
42 const u8 nvc0_pte_storage_type_map[256] =
43 {
44         0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
45         0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
46         0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
47         0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
48         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
49         0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
50         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
51         0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
52         0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
53         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
54         0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
55         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
56         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
57         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
58         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
59         0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
60         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
61         0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
62         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
63         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
64         0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
65         0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
66         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
67         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
68         0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
69         0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
70         0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
71         0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
72         0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
73         0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
74         0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
75         0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
76 };
77
78
79 static void
80 nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
81                 struct nouveau_gpuobj *pgt[2])
82 {
83         u32 pde[2] = { 0, 0 };
84
85         if (pgt[0])
86                 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
87         if (pgt[1])
88                 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
89
90         nv_wo32(pgd, (index * 8) + 0, pde[0]);
91         nv_wo32(pgd, (index * 8) + 4, pde[1]);
92 }
93
94 static inline u64
95 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
96 {
97         phys >>= 8;
98
99         phys |= 0x00000001; /* present */
100         if (vma->access & NV_MEM_ACCESS_SYS)
101                 phys |= 0x00000002;
102
103         phys |= ((u64)target  << 32);
104         phys |= ((u64)memtype << 36);
105
106         return phys;
107 }
108
109 static void
110 nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
111             struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
112 {
113         u64 next = 1 << (vma->node->type - 8);
114
115         phys  = nvc0_vm_addr(vma, phys, mem->memtype, 0);
116         pte <<= 3;
117
118         if (mem->tag) {
119                 struct nouveau_ltcg *ltcg =
120                         nouveau_ltcg(vma->vm->vmm->base.base.parent);
121                 u32 tag = mem->tag->offset + (delta >> 17);
122                 phys |= (u64)tag << (32 + 12);
123                 next |= (u64)1   << (32 + 12);
124                 ltcg->tags_clear(ltcg, tag, cnt);
125         }
126
127         while (cnt--) {
128                 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
129                 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
130                 phys += next;
131                 pte  += 8;
132         }
133 }
134
135 static void
136 nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
137                struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
138 {
139         u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
140         /* compressed storage types are invalid for system memory */
141         u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
142
143         pte <<= 3;
144         while (cnt--) {
145                 u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
146                 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
147                 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
148                 pte += 8;
149         }
150 }
151
152 static void
153 nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
154 {
155         pte <<= 3;
156         while (cnt--) {
157                 nv_wo32(pgt, pte + 0, 0x00000000);
158                 nv_wo32(pgt, pte + 4, 0x00000000);
159                 pte += 8;
160         }
161 }
162
163 static void
164 nvc0_vm_flush(struct nouveau_vm *vm)
165 {
166         struct nvc0_vmmgr_priv *priv = (void *)vm->vmm;
167         struct nouveau_bar *bar = nouveau_bar(priv);
168         struct nouveau_vm_pgd *vpgd;
169         u32 type;
170
171         bar->flush(bar);
172
173         type = 0x00000001; /* PAGE_ALL */
174         if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
175                 type |= 0x00000004; /* HUB_ONLY */
176
177         mutex_lock(&nv_subdev(priv)->mutex);
178         list_for_each_entry(vpgd, &vm->pgd_list, head) {
179                 /* looks like maybe a "free flush slots" counter, the
180                  * faster you write to 0x100cbc to more it decreases
181                  */
182                 if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
183                         nv_error(priv, "vm timeout 0: 0x%08x %d\n",
184                                  nv_rd32(priv, 0x100c80), type);
185                 }
186
187                 nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
188                 nv_wr32(priv, 0x100cbc, 0x80000000 | type);
189
190                 /* wait for flush to be queued? */
191                 if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
192                         nv_error(priv, "vm timeout 1: 0x%08x %d\n",
193                                  nv_rd32(priv, 0x100c80), type);
194                 }
195         }
196         mutex_unlock(&nv_subdev(priv)->mutex);
197 }
198
199 static int
200 nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
201                u64 mm_offset, struct nouveau_vm **pvm)
202 {
203         return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
204 }
205
206 static int
207 nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
208                 struct nouveau_oclass *oclass, void *data, u32 size,
209                 struct nouveau_object **pobject)
210 {
211         struct nvc0_vmmgr_priv *priv;
212         int ret;
213
214         ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
215         *pobject = nv_object(priv);
216         if (ret)
217                 return ret;
218
219         priv->base.limit = 1ULL << 40;
220         priv->base.dma_bits = 40;
221         priv->base.pgt_bits  = 27 - 12;
222         priv->base.spg_shift = 12;
223         priv->base.lpg_shift = 17;
224         priv->base.create = nvc0_vm_create;
225         priv->base.map_pgt = nvc0_vm_map_pgt;
226         priv->base.map = nvc0_vm_map;
227         priv->base.map_sg = nvc0_vm_map_sg;
228         priv->base.unmap = nvc0_vm_unmap;
229         priv->base.flush = nvc0_vm_flush;
230         return 0;
231 }
232
233 struct nouveau_oclass
234 nvc0_vmmgr_oclass = {
235         .handle = NV_SUBDEV(VM, 0xc0),
236         .ofuncs = &(struct nouveau_ofuncs) {
237                 .ctor = nvc0_vmmgr_ctor,
238                 .dtor = _nouveau_vmmgr_dtor,
239                 .init = _nouveau_vmmgr_init,
240                 .fini = _nouveau_vmmgr_fini,
241         },
242 };