Merge remote-tracking branches 'asoc/topic/rcar', 'asoc/topic/reg-default', 'asoc...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / ni.c
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <drm/drmP.h>
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_audio.h"
31 #include <drm/radeon_drm.h>
32 #include "nid.h"
33 #include "atom.h"
34 #include "ni_reg.h"
35 #include "cayman_blit_shaders.h"
36 #include "radeon_ucode.h"
37 #include "clearstate_cayman.h"
38
39 /*
40  * Indirect registers accessor
41  */
42 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
43 {
44         unsigned long flags;
45         u32 r;
46
47         spin_lock_irqsave(&rdev->smc_idx_lock, flags);
48         WREG32(TN_SMC_IND_INDEX_0, (reg));
49         r = RREG32(TN_SMC_IND_DATA_0);
50         spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
51         return r;
52 }
53
54 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
55 {
56         unsigned long flags;
57
58         spin_lock_irqsave(&rdev->smc_idx_lock, flags);
59         WREG32(TN_SMC_IND_INDEX_0, (reg));
60         WREG32(TN_SMC_IND_DATA_0, (v));
61         spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
62 }
63
64 static const u32 tn_rlc_save_restore_register_list[] =
65 {
66         0x98fc,
67         0x98f0,
68         0x9834,
69         0x9838,
70         0x9870,
71         0x9874,
72         0x8a14,
73         0x8b24,
74         0x8bcc,
75         0x8b10,
76         0x8c30,
77         0x8d00,
78         0x8d04,
79         0x8c00,
80         0x8c04,
81         0x8c10,
82         0x8c14,
83         0x8d8c,
84         0x8cf0,
85         0x8e38,
86         0x9508,
87         0x9688,
88         0x9608,
89         0x960c,
90         0x9610,
91         0x9614,
92         0x88c4,
93         0x8978,
94         0x88d4,
95         0x900c,
96         0x9100,
97         0x913c,
98         0x90e8,
99         0x9354,
100         0xa008,
101         0x98f8,
102         0x9148,
103         0x914c,
104         0x3f94,
105         0x98f4,
106         0x9b7c,
107         0x3f8c,
108         0x8950,
109         0x8954,
110         0x8a18,
111         0x8b28,
112         0x9144,
113         0x3f90,
114         0x915c,
115         0x9160,
116         0x9178,
117         0x917c,
118         0x9180,
119         0x918c,
120         0x9190,
121         0x9194,
122         0x9198,
123         0x919c,
124         0x91a8,
125         0x91ac,
126         0x91b0,
127         0x91b4,
128         0x91b8,
129         0x91c4,
130         0x91c8,
131         0x91cc,
132         0x91d0,
133         0x91d4,
134         0x91e0,
135         0x91e4,
136         0x91ec,
137         0x91f0,
138         0x91f4,
139         0x9200,
140         0x9204,
141         0x929c,
142         0x8030,
143         0x9150,
144         0x9a60,
145         0x920c,
146         0x9210,
147         0x9228,
148         0x922c,
149         0x9244,
150         0x9248,
151         0x91e8,
152         0x9294,
153         0x9208,
154         0x9224,
155         0x9240,
156         0x9220,
157         0x923c,
158         0x9258,
159         0x9744,
160         0xa200,
161         0xa204,
162         0xa208,
163         0xa20c,
164         0x8d58,
165         0x9030,
166         0x9034,
167         0x9038,
168         0x903c,
169         0x9040,
170         0x9654,
171         0x897c,
172         0xa210,
173         0xa214,
174         0x9868,
175         0xa02c,
176         0x9664,
177         0x9698,
178         0x949c,
179         0x8e10,
180         0x8e18,
181         0x8c50,
182         0x8c58,
183         0x8c60,
184         0x8c68,
185         0x89b4,
186         0x9830,
187         0x802c,
188 };
189
190 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
191 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
192 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
193 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
194 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
195 extern void evergreen_mc_program(struct radeon_device *rdev);
196 extern void evergreen_irq_suspend(struct radeon_device *rdev);
197 extern int evergreen_mc_init(struct radeon_device *rdev);
198 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
199 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
200 extern void evergreen_program_aspm(struct radeon_device *rdev);
201 extern void sumo_rlc_fini(struct radeon_device *rdev);
202 extern int sumo_rlc_init(struct radeon_device *rdev);
203 extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
204
205 /* Firmware Names */
206 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
207 MODULE_FIRMWARE("radeon/BARTS_me.bin");
208 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
209 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
210 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
211 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
212 MODULE_FIRMWARE("radeon/TURKS_me.bin");
213 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
214 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
215 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
216 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
217 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
218 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
219 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
220 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
221 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
222 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
223 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
224 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
225 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
226 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
227
228
229 static const u32 cayman_golden_registers2[] =
230 {
231         0x3e5c, 0xffffffff, 0x00000000,
232         0x3e48, 0xffffffff, 0x00000000,
233         0x3e4c, 0xffffffff, 0x00000000,
234         0x3e64, 0xffffffff, 0x00000000,
235         0x3e50, 0xffffffff, 0x00000000,
236         0x3e60, 0xffffffff, 0x00000000
237 };
238
239 static const u32 cayman_golden_registers[] =
240 {
241         0x5eb4, 0xffffffff, 0x00000002,
242         0x5e78, 0x8f311ff1, 0x001000f0,
243         0x3f90, 0xffff0000, 0xff000000,
244         0x9148, 0xffff0000, 0xff000000,
245         0x3f94, 0xffff0000, 0xff000000,
246         0x914c, 0xffff0000, 0xff000000,
247         0xc78, 0x00000080, 0x00000080,
248         0xbd4, 0x70073777, 0x00011003,
249         0xd02c, 0xbfffff1f, 0x08421000,
250         0xd0b8, 0x73773777, 0x02011003,
251         0x5bc0, 0x00200000, 0x50100000,
252         0x98f8, 0x33773777, 0x02011003,
253         0x98fc, 0xffffffff, 0x76541032,
254         0x7030, 0x31000311, 0x00000011,
255         0x2f48, 0x33773777, 0x42010001,
256         0x6b28, 0x00000010, 0x00000012,
257         0x7728, 0x00000010, 0x00000012,
258         0x10328, 0x00000010, 0x00000012,
259         0x10f28, 0x00000010, 0x00000012,
260         0x11b28, 0x00000010, 0x00000012,
261         0x12728, 0x00000010, 0x00000012,
262         0x240c, 0x000007ff, 0x00000000,
263         0x8a14, 0xf000001f, 0x00000007,
264         0x8b24, 0x3fff3fff, 0x00ff0fff,
265         0x8b10, 0x0000ff0f, 0x00000000,
266         0x28a4c, 0x07ffffff, 0x06000000,
267         0x10c, 0x00000001, 0x00010003,
268         0xa02c, 0xffffffff, 0x0000009b,
269         0x913c, 0x0000010f, 0x01000100,
270         0x8c04, 0xf8ff00ff, 0x40600060,
271         0x28350, 0x00000f01, 0x00000000,
272         0x9508, 0x3700001f, 0x00000002,
273         0x960c, 0xffffffff, 0x54763210,
274         0x88c4, 0x001f3ae3, 0x00000082,
275         0x88d0, 0xffffffff, 0x0f40df40,
276         0x88d4, 0x0000001f, 0x00000010,
277         0x8974, 0xffffffff, 0x00000000
278 };
279
280 static const u32 dvst_golden_registers2[] =
281 {
282         0x8f8, 0xffffffff, 0,
283         0x8fc, 0x00380000, 0,
284         0x8f8, 0xffffffff, 1,
285         0x8fc, 0x0e000000, 0
286 };
287
288 static const u32 dvst_golden_registers[] =
289 {
290         0x690, 0x3fff3fff, 0x20c00033,
291         0x918c, 0x0fff0fff, 0x00010006,
292         0x91a8, 0x0fff0fff, 0x00010006,
293         0x9150, 0xffffdfff, 0x6e944040,
294         0x917c, 0x0fff0fff, 0x00030002,
295         0x9198, 0x0fff0fff, 0x00030002,
296         0x915c, 0x0fff0fff, 0x00010000,
297         0x3f90, 0xffff0001, 0xff000000,
298         0x9178, 0x0fff0fff, 0x00070000,
299         0x9194, 0x0fff0fff, 0x00070000,
300         0x9148, 0xffff0001, 0xff000000,
301         0x9190, 0x0fff0fff, 0x00090008,
302         0x91ac, 0x0fff0fff, 0x00090008,
303         0x3f94, 0xffff0000, 0xff000000,
304         0x914c, 0xffff0000, 0xff000000,
305         0x929c, 0x00000fff, 0x00000001,
306         0x55e4, 0xff607fff, 0xfc000100,
307         0x8a18, 0xff000fff, 0x00000100,
308         0x8b28, 0xff000fff, 0x00000100,
309         0x9144, 0xfffc0fff, 0x00000100,
310         0x6ed8, 0x00010101, 0x00010000,
311         0x9830, 0xffffffff, 0x00000000,
312         0x9834, 0xf00fffff, 0x00000400,
313         0x9838, 0xfffffffe, 0x00000000,
314         0xd0c0, 0xff000fff, 0x00000100,
315         0xd02c, 0xbfffff1f, 0x08421000,
316         0xd0b8, 0x73773777, 0x12010001,
317         0x5bb0, 0x000000f0, 0x00000070,
318         0x98f8, 0x73773777, 0x12010001,
319         0x98fc, 0xffffffff, 0x00000010,
320         0x9b7c, 0x00ff0000, 0x00fc0000,
321         0x8030, 0x00001f0f, 0x0000100a,
322         0x2f48, 0x73773777, 0x12010001,
323         0x2408, 0x00030000, 0x000c007f,
324         0x8a14, 0xf000003f, 0x00000007,
325         0x8b24, 0x3fff3fff, 0x00ff0fff,
326         0x8b10, 0x0000ff0f, 0x00000000,
327         0x28a4c, 0x07ffffff, 0x06000000,
328         0x4d8, 0x00000fff, 0x00000100,
329         0xa008, 0xffffffff, 0x00010000,
330         0x913c, 0xffff03ff, 0x01000100,
331         0x8c00, 0x000000ff, 0x00000003,
332         0x8c04, 0xf8ff00ff, 0x40600060,
333         0x8cf0, 0x1fff1fff, 0x08e00410,
334         0x28350, 0x00000f01, 0x00000000,
335         0x9508, 0xf700071f, 0x00000002,
336         0x960c, 0xffffffff, 0x54763210,
337         0x20ef8, 0x01ff01ff, 0x00000002,
338         0x20e98, 0xfffffbff, 0x00200000,
339         0x2015c, 0xffffffff, 0x00000f40,
340         0x88c4, 0x001f3ae3, 0x00000082,
341         0x8978, 0x3fffffff, 0x04050140,
342         0x88d4, 0x0000001f, 0x00000010,
343         0x8974, 0xffffffff, 0x00000000
344 };
345
346 static const u32 scrapper_golden_registers[] =
347 {
348         0x690, 0x3fff3fff, 0x20c00033,
349         0x918c, 0x0fff0fff, 0x00010006,
350         0x918c, 0x0fff0fff, 0x00010006,
351         0x91a8, 0x0fff0fff, 0x00010006,
352         0x91a8, 0x0fff0fff, 0x00010006,
353         0x9150, 0xffffdfff, 0x6e944040,
354         0x9150, 0xffffdfff, 0x6e944040,
355         0x917c, 0x0fff0fff, 0x00030002,
356         0x917c, 0x0fff0fff, 0x00030002,
357         0x9198, 0x0fff0fff, 0x00030002,
358         0x9198, 0x0fff0fff, 0x00030002,
359         0x915c, 0x0fff0fff, 0x00010000,
360         0x915c, 0x0fff0fff, 0x00010000,
361         0x3f90, 0xffff0001, 0xff000000,
362         0x3f90, 0xffff0001, 0xff000000,
363         0x9178, 0x0fff0fff, 0x00070000,
364         0x9178, 0x0fff0fff, 0x00070000,
365         0x9194, 0x0fff0fff, 0x00070000,
366         0x9194, 0x0fff0fff, 0x00070000,
367         0x9148, 0xffff0001, 0xff000000,
368         0x9148, 0xffff0001, 0xff000000,
369         0x9190, 0x0fff0fff, 0x00090008,
370         0x9190, 0x0fff0fff, 0x00090008,
371         0x91ac, 0x0fff0fff, 0x00090008,
372         0x91ac, 0x0fff0fff, 0x00090008,
373         0x3f94, 0xffff0000, 0xff000000,
374         0x3f94, 0xffff0000, 0xff000000,
375         0x914c, 0xffff0000, 0xff000000,
376         0x914c, 0xffff0000, 0xff000000,
377         0x929c, 0x00000fff, 0x00000001,
378         0x929c, 0x00000fff, 0x00000001,
379         0x55e4, 0xff607fff, 0xfc000100,
380         0x8a18, 0xff000fff, 0x00000100,
381         0x8a18, 0xff000fff, 0x00000100,
382         0x8b28, 0xff000fff, 0x00000100,
383         0x8b28, 0xff000fff, 0x00000100,
384         0x9144, 0xfffc0fff, 0x00000100,
385         0x9144, 0xfffc0fff, 0x00000100,
386         0x6ed8, 0x00010101, 0x00010000,
387         0x9830, 0xffffffff, 0x00000000,
388         0x9830, 0xffffffff, 0x00000000,
389         0x9834, 0xf00fffff, 0x00000400,
390         0x9834, 0xf00fffff, 0x00000400,
391         0x9838, 0xfffffffe, 0x00000000,
392         0x9838, 0xfffffffe, 0x00000000,
393         0xd0c0, 0xff000fff, 0x00000100,
394         0xd02c, 0xbfffff1f, 0x08421000,
395         0xd02c, 0xbfffff1f, 0x08421000,
396         0xd0b8, 0x73773777, 0x12010001,
397         0xd0b8, 0x73773777, 0x12010001,
398         0x5bb0, 0x000000f0, 0x00000070,
399         0x98f8, 0x73773777, 0x12010001,
400         0x98f8, 0x73773777, 0x12010001,
401         0x98fc, 0xffffffff, 0x00000010,
402         0x98fc, 0xffffffff, 0x00000010,
403         0x9b7c, 0x00ff0000, 0x00fc0000,
404         0x9b7c, 0x00ff0000, 0x00fc0000,
405         0x8030, 0x00001f0f, 0x0000100a,
406         0x8030, 0x00001f0f, 0x0000100a,
407         0x2f48, 0x73773777, 0x12010001,
408         0x2f48, 0x73773777, 0x12010001,
409         0x2408, 0x00030000, 0x000c007f,
410         0x8a14, 0xf000003f, 0x00000007,
411         0x8a14, 0xf000003f, 0x00000007,
412         0x8b24, 0x3fff3fff, 0x00ff0fff,
413         0x8b24, 0x3fff3fff, 0x00ff0fff,
414         0x8b10, 0x0000ff0f, 0x00000000,
415         0x8b10, 0x0000ff0f, 0x00000000,
416         0x28a4c, 0x07ffffff, 0x06000000,
417         0x28a4c, 0x07ffffff, 0x06000000,
418         0x4d8, 0x00000fff, 0x00000100,
419         0x4d8, 0x00000fff, 0x00000100,
420         0xa008, 0xffffffff, 0x00010000,
421         0xa008, 0xffffffff, 0x00010000,
422         0x913c, 0xffff03ff, 0x01000100,
423         0x913c, 0xffff03ff, 0x01000100,
424         0x90e8, 0x001fffff, 0x010400c0,
425         0x8c00, 0x000000ff, 0x00000003,
426         0x8c00, 0x000000ff, 0x00000003,
427         0x8c04, 0xf8ff00ff, 0x40600060,
428         0x8c04, 0xf8ff00ff, 0x40600060,
429         0x8c30, 0x0000000f, 0x00040005,
430         0x8cf0, 0x1fff1fff, 0x08e00410,
431         0x8cf0, 0x1fff1fff, 0x08e00410,
432         0x900c, 0x00ffffff, 0x0017071f,
433         0x28350, 0x00000f01, 0x00000000,
434         0x28350, 0x00000f01, 0x00000000,
435         0x9508, 0xf700071f, 0x00000002,
436         0x9508, 0xf700071f, 0x00000002,
437         0x9688, 0x00300000, 0x0017000f,
438         0x960c, 0xffffffff, 0x54763210,
439         0x960c, 0xffffffff, 0x54763210,
440         0x20ef8, 0x01ff01ff, 0x00000002,
441         0x20e98, 0xfffffbff, 0x00200000,
442         0x2015c, 0xffffffff, 0x00000f40,
443         0x88c4, 0x001f3ae3, 0x00000082,
444         0x88c4, 0x001f3ae3, 0x00000082,
445         0x8978, 0x3fffffff, 0x04050140,
446         0x8978, 0x3fffffff, 0x04050140,
447         0x88d4, 0x0000001f, 0x00000010,
448         0x88d4, 0x0000001f, 0x00000010,
449         0x8974, 0xffffffff, 0x00000000,
450         0x8974, 0xffffffff, 0x00000000
451 };
452
453 static void ni_init_golden_registers(struct radeon_device *rdev)
454 {
455         switch (rdev->family) {
456         case CHIP_CAYMAN:
457                 radeon_program_register_sequence(rdev,
458                                                  cayman_golden_registers,
459                                                  (const u32)ARRAY_SIZE(cayman_golden_registers));
460                 radeon_program_register_sequence(rdev,
461                                                  cayman_golden_registers2,
462                                                  (const u32)ARRAY_SIZE(cayman_golden_registers2));
463                 break;
464         case CHIP_ARUBA:
465                 if ((rdev->pdev->device == 0x9900) ||
466                     (rdev->pdev->device == 0x9901) ||
467                     (rdev->pdev->device == 0x9903) ||
468                     (rdev->pdev->device == 0x9904) ||
469                     (rdev->pdev->device == 0x9905) ||
470                     (rdev->pdev->device == 0x9906) ||
471                     (rdev->pdev->device == 0x9907) ||
472                     (rdev->pdev->device == 0x9908) ||
473                     (rdev->pdev->device == 0x9909) ||
474                     (rdev->pdev->device == 0x990A) ||
475                     (rdev->pdev->device == 0x990B) ||
476                     (rdev->pdev->device == 0x990C) ||
477                     (rdev->pdev->device == 0x990D) ||
478                     (rdev->pdev->device == 0x990E) ||
479                     (rdev->pdev->device == 0x990F) ||
480                     (rdev->pdev->device == 0x9910) ||
481                     (rdev->pdev->device == 0x9913) ||
482                     (rdev->pdev->device == 0x9917) ||
483                     (rdev->pdev->device == 0x9918)) {
484                         radeon_program_register_sequence(rdev,
485                                                          dvst_golden_registers,
486                                                          (const u32)ARRAY_SIZE(dvst_golden_registers));
487                         radeon_program_register_sequence(rdev,
488                                                          dvst_golden_registers2,
489                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
490                 } else {
491                         radeon_program_register_sequence(rdev,
492                                                          scrapper_golden_registers,
493                                                          (const u32)ARRAY_SIZE(scrapper_golden_registers));
494                         radeon_program_register_sequence(rdev,
495                                                          dvst_golden_registers2,
496                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
497                 }
498                 break;
499         default:
500                 break;
501         }
502 }
503
504 #define BTC_IO_MC_REGS_SIZE 29
505
506 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
507         {0x00000077, 0xff010100},
508         {0x00000078, 0x00000000},
509         {0x00000079, 0x00001434},
510         {0x0000007a, 0xcc08ec08},
511         {0x0000007b, 0x00040000},
512         {0x0000007c, 0x000080c0},
513         {0x0000007d, 0x09000000},
514         {0x0000007e, 0x00210404},
515         {0x00000081, 0x08a8e800},
516         {0x00000082, 0x00030444},
517         {0x00000083, 0x00000000},
518         {0x00000085, 0x00000001},
519         {0x00000086, 0x00000002},
520         {0x00000087, 0x48490000},
521         {0x00000088, 0x20244647},
522         {0x00000089, 0x00000005},
523         {0x0000008b, 0x66030000},
524         {0x0000008c, 0x00006603},
525         {0x0000008d, 0x00000100},
526         {0x0000008f, 0x00001c0a},
527         {0x00000090, 0xff000001},
528         {0x00000094, 0x00101101},
529         {0x00000095, 0x00000fff},
530         {0x00000096, 0x00116fff},
531         {0x00000097, 0x60010000},
532         {0x00000098, 0x10010000},
533         {0x00000099, 0x00006000},
534         {0x0000009a, 0x00001000},
535         {0x0000009f, 0x00946a00}
536 };
537
538 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
539         {0x00000077, 0xff010100},
540         {0x00000078, 0x00000000},
541         {0x00000079, 0x00001434},
542         {0x0000007a, 0xcc08ec08},
543         {0x0000007b, 0x00040000},
544         {0x0000007c, 0x000080c0},
545         {0x0000007d, 0x09000000},
546         {0x0000007e, 0x00210404},
547         {0x00000081, 0x08a8e800},
548         {0x00000082, 0x00030444},
549         {0x00000083, 0x00000000},
550         {0x00000085, 0x00000001},
551         {0x00000086, 0x00000002},
552         {0x00000087, 0x48490000},
553         {0x00000088, 0x20244647},
554         {0x00000089, 0x00000005},
555         {0x0000008b, 0x66030000},
556         {0x0000008c, 0x00006603},
557         {0x0000008d, 0x00000100},
558         {0x0000008f, 0x00001c0a},
559         {0x00000090, 0xff000001},
560         {0x00000094, 0x00101101},
561         {0x00000095, 0x00000fff},
562         {0x00000096, 0x00116fff},
563         {0x00000097, 0x60010000},
564         {0x00000098, 0x10010000},
565         {0x00000099, 0x00006000},
566         {0x0000009a, 0x00001000},
567         {0x0000009f, 0x00936a00}
568 };
569
570 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
571         {0x00000077, 0xff010100},
572         {0x00000078, 0x00000000},
573         {0x00000079, 0x00001434},
574         {0x0000007a, 0xcc08ec08},
575         {0x0000007b, 0x00040000},
576         {0x0000007c, 0x000080c0},
577         {0x0000007d, 0x09000000},
578         {0x0000007e, 0x00210404},
579         {0x00000081, 0x08a8e800},
580         {0x00000082, 0x00030444},
581         {0x00000083, 0x00000000},
582         {0x00000085, 0x00000001},
583         {0x00000086, 0x00000002},
584         {0x00000087, 0x48490000},
585         {0x00000088, 0x20244647},
586         {0x00000089, 0x00000005},
587         {0x0000008b, 0x66030000},
588         {0x0000008c, 0x00006603},
589         {0x0000008d, 0x00000100},
590         {0x0000008f, 0x00001c0a},
591         {0x00000090, 0xff000001},
592         {0x00000094, 0x00101101},
593         {0x00000095, 0x00000fff},
594         {0x00000096, 0x00116fff},
595         {0x00000097, 0x60010000},
596         {0x00000098, 0x10010000},
597         {0x00000099, 0x00006000},
598         {0x0000009a, 0x00001000},
599         {0x0000009f, 0x00916a00}
600 };
601
602 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
603         {0x00000077, 0xff010100},
604         {0x00000078, 0x00000000},
605         {0x00000079, 0x00001434},
606         {0x0000007a, 0xcc08ec08},
607         {0x0000007b, 0x00040000},
608         {0x0000007c, 0x000080c0},
609         {0x0000007d, 0x09000000},
610         {0x0000007e, 0x00210404},
611         {0x00000081, 0x08a8e800},
612         {0x00000082, 0x00030444},
613         {0x00000083, 0x00000000},
614         {0x00000085, 0x00000001},
615         {0x00000086, 0x00000002},
616         {0x00000087, 0x48490000},
617         {0x00000088, 0x20244647},
618         {0x00000089, 0x00000005},
619         {0x0000008b, 0x66030000},
620         {0x0000008c, 0x00006603},
621         {0x0000008d, 0x00000100},
622         {0x0000008f, 0x00001c0a},
623         {0x00000090, 0xff000001},
624         {0x00000094, 0x00101101},
625         {0x00000095, 0x00000fff},
626         {0x00000096, 0x00116fff},
627         {0x00000097, 0x60010000},
628         {0x00000098, 0x10010000},
629         {0x00000099, 0x00006000},
630         {0x0000009a, 0x00001000},
631         {0x0000009f, 0x00976b00}
632 };
633
634 int ni_mc_load_microcode(struct radeon_device *rdev)
635 {
636         const __be32 *fw_data;
637         u32 mem_type, running, blackout = 0;
638         u32 *io_mc_regs;
639         int i, ucode_size, regs_size;
640
641         if (!rdev->mc_fw)
642                 return -EINVAL;
643
644         switch (rdev->family) {
645         case CHIP_BARTS:
646                 io_mc_regs = (u32 *)&barts_io_mc_regs;
647                 ucode_size = BTC_MC_UCODE_SIZE;
648                 regs_size = BTC_IO_MC_REGS_SIZE;
649                 break;
650         case CHIP_TURKS:
651                 io_mc_regs = (u32 *)&turks_io_mc_regs;
652                 ucode_size = BTC_MC_UCODE_SIZE;
653                 regs_size = BTC_IO_MC_REGS_SIZE;
654                 break;
655         case CHIP_CAICOS:
656         default:
657                 io_mc_regs = (u32 *)&caicos_io_mc_regs;
658                 ucode_size = BTC_MC_UCODE_SIZE;
659                 regs_size = BTC_IO_MC_REGS_SIZE;
660                 break;
661         case CHIP_CAYMAN:
662                 io_mc_regs = (u32 *)&cayman_io_mc_regs;
663                 ucode_size = CAYMAN_MC_UCODE_SIZE;
664                 regs_size = BTC_IO_MC_REGS_SIZE;
665                 break;
666         }
667
668         mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
669         running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
670
671         if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
672                 if (running) {
673                         blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
674                         WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
675                 }
676
677                 /* reset the engine and set to writable */
678                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
679                 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
680
681                 /* load mc io regs */
682                 for (i = 0; i < regs_size; i++) {
683                         WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
684                         WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
685                 }
686                 /* load the MC ucode */
687                 fw_data = (const __be32 *)rdev->mc_fw->data;
688                 for (i = 0; i < ucode_size; i++)
689                         WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
690
691                 /* put the engine back into the active state */
692                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
693                 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
694                 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
695
696                 /* wait for training to complete */
697                 for (i = 0; i < rdev->usec_timeout; i++) {
698                         if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
699                                 break;
700                         udelay(1);
701                 }
702
703                 if (running)
704                         WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
705         }
706
707         return 0;
708 }
709
710 int ni_init_microcode(struct radeon_device *rdev)
711 {
712         const char *chip_name;
713         const char *rlc_chip_name;
714         size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
715         size_t smc_req_size = 0;
716         char fw_name[30];
717         int err;
718
719         DRM_DEBUG("\n");
720
721         switch (rdev->family) {
722         case CHIP_BARTS:
723                 chip_name = "BARTS";
724                 rlc_chip_name = "BTC";
725                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
726                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
727                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
728                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
729                 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
730                 break;
731         case CHIP_TURKS:
732                 chip_name = "TURKS";
733                 rlc_chip_name = "BTC";
734                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
735                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
736                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
737                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
738                 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
739                 break;
740         case CHIP_CAICOS:
741                 chip_name = "CAICOS";
742                 rlc_chip_name = "BTC";
743                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
744                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
745                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
746                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
747                 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
748                 break;
749         case CHIP_CAYMAN:
750                 chip_name = "CAYMAN";
751                 rlc_chip_name = "CAYMAN";
752                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
753                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
754                 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
755                 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
756                 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
757                 break;
758         case CHIP_ARUBA:
759                 chip_name = "ARUBA";
760                 rlc_chip_name = "ARUBA";
761                 /* pfp/me same size as CAYMAN */
762                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
763                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
764                 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
765                 mc_req_size = 0;
766                 break;
767         default: BUG();
768         }
769
770         DRM_INFO("Loading %s Microcode\n", chip_name);
771
772         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
773         err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
774         if (err)
775                 goto out;
776         if (rdev->pfp_fw->size != pfp_req_size) {
777                 printk(KERN_ERR
778                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
779                        rdev->pfp_fw->size, fw_name);
780                 err = -EINVAL;
781                 goto out;
782         }
783
784         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
785         err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
786         if (err)
787                 goto out;
788         if (rdev->me_fw->size != me_req_size) {
789                 printk(KERN_ERR
790                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
791                        rdev->me_fw->size, fw_name);
792                 err = -EINVAL;
793         }
794
795         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
796         err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
797         if (err)
798                 goto out;
799         if (rdev->rlc_fw->size != rlc_req_size) {
800                 printk(KERN_ERR
801                        "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
802                        rdev->rlc_fw->size, fw_name);
803                 err = -EINVAL;
804         }
805
806         /* no MC ucode on TN */
807         if (!(rdev->flags & RADEON_IS_IGP)) {
808                 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
809                 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
810                 if (err)
811                         goto out;
812                 if (rdev->mc_fw->size != mc_req_size) {
813                         printk(KERN_ERR
814                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
815                                rdev->mc_fw->size, fw_name);
816                         err = -EINVAL;
817                 }
818         }
819
820         if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
821                 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
822                 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
823                 if (err) {
824                         printk(KERN_ERR
825                                "smc: error loading firmware \"%s\"\n",
826                                fw_name);
827                         release_firmware(rdev->smc_fw);
828                         rdev->smc_fw = NULL;
829                         err = 0;
830                 } else if (rdev->smc_fw->size != smc_req_size) {
831                         printk(KERN_ERR
832                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
833                                rdev->mc_fw->size, fw_name);
834                         err = -EINVAL;
835                 }
836         }
837
838 out:
839         if (err) {
840                 if (err != -EINVAL)
841                         printk(KERN_ERR
842                                "ni_cp: Failed to load firmware \"%s\"\n",
843                                fw_name);
844                 release_firmware(rdev->pfp_fw);
845                 rdev->pfp_fw = NULL;
846                 release_firmware(rdev->me_fw);
847                 rdev->me_fw = NULL;
848                 release_firmware(rdev->rlc_fw);
849                 rdev->rlc_fw = NULL;
850                 release_firmware(rdev->mc_fw);
851                 rdev->mc_fw = NULL;
852         }
853         return err;
854 }
855
856 /**
857  * cayman_get_allowed_info_register - fetch the register for the info ioctl
858  *
859  * @rdev: radeon_device pointer
860  * @reg: register offset in bytes
861  * @val: register value
862  *
863  * Returns 0 for success or -EINVAL for an invalid register
864  *
865  */
866 int cayman_get_allowed_info_register(struct radeon_device *rdev,
867                                      u32 reg, u32 *val)
868 {
869         switch (reg) {
870         case GRBM_STATUS:
871         case GRBM_STATUS_SE0:
872         case GRBM_STATUS_SE1:
873         case SRBM_STATUS:
874         case SRBM_STATUS2:
875         case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
876         case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
877         case UVD_STATUS:
878                 *val = RREG32(reg);
879                 return 0;
880         default:
881                 return -EINVAL;
882         }
883 }
884
885 int tn_get_temp(struct radeon_device *rdev)
886 {
887         u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
888         int actual_temp = (temp / 8) - 49;
889
890         return actual_temp * 1000;
891 }
892
893 /*
894  * Core functions
895  */
896 static void cayman_gpu_init(struct radeon_device *rdev)
897 {
898         u32 gb_addr_config = 0;
899         u32 mc_shared_chmap, mc_arb_ramcfg;
900         u32 cgts_tcc_disable;
901         u32 sx_debug_1;
902         u32 smx_dc_ctl0;
903         u32 cgts_sm_ctrl_reg;
904         u32 hdp_host_path_cntl;
905         u32 tmp;
906         u32 disabled_rb_mask;
907         int i, j;
908
909         switch (rdev->family) {
910         case CHIP_CAYMAN:
911                 rdev->config.cayman.max_shader_engines = 2;
912                 rdev->config.cayman.max_pipes_per_simd = 4;
913                 rdev->config.cayman.max_tile_pipes = 8;
914                 rdev->config.cayman.max_simds_per_se = 12;
915                 rdev->config.cayman.max_backends_per_se = 4;
916                 rdev->config.cayman.max_texture_channel_caches = 8;
917                 rdev->config.cayman.max_gprs = 256;
918                 rdev->config.cayman.max_threads = 256;
919                 rdev->config.cayman.max_gs_threads = 32;
920                 rdev->config.cayman.max_stack_entries = 512;
921                 rdev->config.cayman.sx_num_of_sets = 8;
922                 rdev->config.cayman.sx_max_export_size = 256;
923                 rdev->config.cayman.sx_max_export_pos_size = 64;
924                 rdev->config.cayman.sx_max_export_smx_size = 192;
925                 rdev->config.cayman.max_hw_contexts = 8;
926                 rdev->config.cayman.sq_num_cf_insts = 2;
927
928                 rdev->config.cayman.sc_prim_fifo_size = 0x100;
929                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
930                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
931                 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
932                 break;
933         case CHIP_ARUBA:
934         default:
935                 rdev->config.cayman.max_shader_engines = 1;
936                 rdev->config.cayman.max_pipes_per_simd = 4;
937                 rdev->config.cayman.max_tile_pipes = 2;
938                 if ((rdev->pdev->device == 0x9900) ||
939                     (rdev->pdev->device == 0x9901) ||
940                     (rdev->pdev->device == 0x9905) ||
941                     (rdev->pdev->device == 0x9906) ||
942                     (rdev->pdev->device == 0x9907) ||
943                     (rdev->pdev->device == 0x9908) ||
944                     (rdev->pdev->device == 0x9909) ||
945                     (rdev->pdev->device == 0x990B) ||
946                     (rdev->pdev->device == 0x990C) ||
947                     (rdev->pdev->device == 0x990F) ||
948                     (rdev->pdev->device == 0x9910) ||
949                     (rdev->pdev->device == 0x9917) ||
950                     (rdev->pdev->device == 0x9999) ||
951                     (rdev->pdev->device == 0x999C)) {
952                         rdev->config.cayman.max_simds_per_se = 6;
953                         rdev->config.cayman.max_backends_per_se = 2;
954                         rdev->config.cayman.max_hw_contexts = 8;
955                         rdev->config.cayman.sx_max_export_size = 256;
956                         rdev->config.cayman.sx_max_export_pos_size = 64;
957                         rdev->config.cayman.sx_max_export_smx_size = 192;
958                 } else if ((rdev->pdev->device == 0x9903) ||
959                            (rdev->pdev->device == 0x9904) ||
960                            (rdev->pdev->device == 0x990A) ||
961                            (rdev->pdev->device == 0x990D) ||
962                            (rdev->pdev->device == 0x990E) ||
963                            (rdev->pdev->device == 0x9913) ||
964                            (rdev->pdev->device == 0x9918) ||
965                            (rdev->pdev->device == 0x999D)) {
966                         rdev->config.cayman.max_simds_per_se = 4;
967                         rdev->config.cayman.max_backends_per_se = 2;
968                         rdev->config.cayman.max_hw_contexts = 8;
969                         rdev->config.cayman.sx_max_export_size = 256;
970                         rdev->config.cayman.sx_max_export_pos_size = 64;
971                         rdev->config.cayman.sx_max_export_smx_size = 192;
972                 } else if ((rdev->pdev->device == 0x9919) ||
973                            (rdev->pdev->device == 0x9990) ||
974                            (rdev->pdev->device == 0x9991) ||
975                            (rdev->pdev->device == 0x9994) ||
976                            (rdev->pdev->device == 0x9995) ||
977                            (rdev->pdev->device == 0x9996) ||
978                            (rdev->pdev->device == 0x999A) ||
979                            (rdev->pdev->device == 0x99A0)) {
980                         rdev->config.cayman.max_simds_per_se = 3;
981                         rdev->config.cayman.max_backends_per_se = 1;
982                         rdev->config.cayman.max_hw_contexts = 4;
983                         rdev->config.cayman.sx_max_export_size = 128;
984                         rdev->config.cayman.sx_max_export_pos_size = 32;
985                         rdev->config.cayman.sx_max_export_smx_size = 96;
986                 } else {
987                         rdev->config.cayman.max_simds_per_se = 2;
988                         rdev->config.cayman.max_backends_per_se = 1;
989                         rdev->config.cayman.max_hw_contexts = 4;
990                         rdev->config.cayman.sx_max_export_size = 128;
991                         rdev->config.cayman.sx_max_export_pos_size = 32;
992                         rdev->config.cayman.sx_max_export_smx_size = 96;
993                 }
994                 rdev->config.cayman.max_texture_channel_caches = 2;
995                 rdev->config.cayman.max_gprs = 256;
996                 rdev->config.cayman.max_threads = 256;
997                 rdev->config.cayman.max_gs_threads = 32;
998                 rdev->config.cayman.max_stack_entries = 512;
999                 rdev->config.cayman.sx_num_of_sets = 8;
1000                 rdev->config.cayman.sq_num_cf_insts = 2;
1001
1002                 rdev->config.cayman.sc_prim_fifo_size = 0x40;
1003                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
1004                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
1005                 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
1006                 break;
1007         }
1008
1009         /* Initialize HDP */
1010         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1011                 WREG32((0x2c14 + j), 0x00000000);
1012                 WREG32((0x2c18 + j), 0x00000000);
1013                 WREG32((0x2c1c + j), 0x00000000);
1014                 WREG32((0x2c20 + j), 0x00000000);
1015                 WREG32((0x2c24 + j), 0x00000000);
1016         }
1017
1018         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1019         WREG32(SRBM_INT_CNTL, 0x1);
1020         WREG32(SRBM_INT_ACK, 0x1);
1021
1022         evergreen_fix_pci_max_read_req_size(rdev);
1023
1024         mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1025         mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1026
1027         tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1028         rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1029         if (rdev->config.cayman.mem_row_size_in_kb > 4)
1030                 rdev->config.cayman.mem_row_size_in_kb = 4;
1031         /* XXX use MC settings? */
1032         rdev->config.cayman.shader_engine_tile_size = 32;
1033         rdev->config.cayman.num_gpus = 1;
1034         rdev->config.cayman.multi_gpu_tile_size = 64;
1035
1036         tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1037         rdev->config.cayman.num_tile_pipes = (1 << tmp);
1038         tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1039         rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
1040         tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1041         rdev->config.cayman.num_shader_engines = tmp + 1;
1042         tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1043         rdev->config.cayman.num_gpus = tmp + 1;
1044         tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1045         rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
1046         tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1047         rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
1048
1049
1050         /* setup tiling info dword.  gb_addr_config is not adequate since it does
1051          * not have bank info, so create a custom tiling dword.
1052          * bits 3:0   num_pipes
1053          * bits 7:4   num_banks
1054          * bits 11:8  group_size
1055          * bits 15:12 row_size
1056          */
1057         rdev->config.cayman.tile_config = 0;
1058         switch (rdev->config.cayman.num_tile_pipes) {
1059         case 1:
1060         default:
1061                 rdev->config.cayman.tile_config |= (0 << 0);
1062                 break;
1063         case 2:
1064                 rdev->config.cayman.tile_config |= (1 << 0);
1065                 break;
1066         case 4:
1067                 rdev->config.cayman.tile_config |= (2 << 0);
1068                 break;
1069         case 8:
1070                 rdev->config.cayman.tile_config |= (3 << 0);
1071                 break;
1072         }
1073
1074         /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1075         if (rdev->flags & RADEON_IS_IGP)
1076                 rdev->config.cayman.tile_config |= 1 << 4;
1077         else {
1078                 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1079                 case 0: /* four banks */
1080                         rdev->config.cayman.tile_config |= 0 << 4;
1081                         break;
1082                 case 1: /* eight banks */
1083                         rdev->config.cayman.tile_config |= 1 << 4;
1084                         break;
1085                 case 2: /* sixteen banks */
1086                 default:
1087                         rdev->config.cayman.tile_config |= 2 << 4;
1088                         break;
1089                 }
1090         }
1091         rdev->config.cayman.tile_config |=
1092                 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1093         rdev->config.cayman.tile_config |=
1094                 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1095
1096         tmp = 0;
1097         for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1098                 u32 rb_disable_bitmap;
1099
1100                 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1101                 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1102                 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1103                 tmp <<= 4;
1104                 tmp |= rb_disable_bitmap;
1105         }
1106         /* enabled rb are just the one not disabled :) */
1107         disabled_rb_mask = tmp;
1108         tmp = 0;
1109         for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1110                 tmp |= (1 << i);
1111         /* if all the backends are disabled, fix it up here */
1112         if ((disabled_rb_mask & tmp) == tmp) {
1113                 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1114                         disabled_rb_mask &= ~(1 << i);
1115         }
1116
1117         for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1118                 u32 simd_disable_bitmap;
1119
1120                 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1121                 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1122                 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1123                 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1124                 tmp <<= 16;
1125                 tmp |= simd_disable_bitmap;
1126         }
1127         rdev->config.cayman.active_simds = hweight32(~tmp);
1128
1129         WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1130         WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1131
1132         WREG32(GB_ADDR_CONFIG, gb_addr_config);
1133         WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1134         if (ASIC_IS_DCE6(rdev))
1135                 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1136         WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1137         WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1138         WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1139         WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1140         WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1141         WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1142
1143         if ((rdev->config.cayman.max_backends_per_se == 1) &&
1144             (rdev->flags & RADEON_IS_IGP)) {
1145                 if ((disabled_rb_mask & 3) == 2) {
1146                         /* RB1 disabled, RB0 enabled */
1147                         tmp = 0x00000000;
1148                 } else {
1149                         /* RB0 disabled, RB1 enabled */
1150                         tmp = 0x11111111;
1151                 }
1152         } else {
1153                 tmp = gb_addr_config & NUM_PIPES_MASK;
1154                 tmp = r6xx_remap_render_backend(rdev, tmp,
1155                                                 rdev->config.cayman.max_backends_per_se *
1156                                                 rdev->config.cayman.max_shader_engines,
1157                                                 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1158         }
1159         WREG32(GB_BACKEND_MAP, tmp);
1160
1161         cgts_tcc_disable = 0xffff0000;
1162         for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1163                 cgts_tcc_disable &= ~(1 << (16 + i));
1164         WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1165         WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1166         WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1167         WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1168
1169         /* reprogram the shader complex */
1170         cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1171         for (i = 0; i < 16; i++)
1172                 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1173         WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1174
1175         /* set HW defaults for 3D engine */
1176         WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1177
1178         sx_debug_1 = RREG32(SX_DEBUG_1);
1179         sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1180         WREG32(SX_DEBUG_1, sx_debug_1);
1181
1182         smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1183         smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1184         smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1185         WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1186
1187         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1188
1189         /* need to be explicitly zero-ed */
1190         WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1191         WREG32(SQ_LSTMP_RING_BASE, 0);
1192         WREG32(SQ_HSTMP_RING_BASE, 0);
1193         WREG32(SQ_ESTMP_RING_BASE, 0);
1194         WREG32(SQ_GSTMP_RING_BASE, 0);
1195         WREG32(SQ_VSTMP_RING_BASE, 0);
1196         WREG32(SQ_PSTMP_RING_BASE, 0);
1197
1198         WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1199
1200         WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1201                                         POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1202                                         SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1203
1204         WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1205                                  SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1206                                  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1207
1208
1209         WREG32(VGT_NUM_INSTANCES, 1);
1210
1211         WREG32(CP_PERFMON_CNTL, 0);
1212
1213         WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1214                                   FETCH_FIFO_HIWATER(0x4) |
1215                                   DONE_FIFO_HIWATER(0xe0) |
1216                                   ALU_UPDATE_FIFO_HIWATER(0x8)));
1217
1218         WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1219         WREG32(SQ_CONFIG, (VC_ENABLE |
1220                            EXPORT_SRC_C |
1221                            GFX_PRIO(0) |
1222                            CS1_PRIO(0) |
1223                            CS2_PRIO(1)));
1224         WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1225
1226         WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1227                                           FORCE_EOV_MAX_REZ_CNT(255)));
1228
1229         WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1230                AUTO_INVLD_EN(ES_AND_GS_AUTO));
1231
1232         WREG32(VGT_GS_VERTEX_REUSE, 16);
1233         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1234
1235         WREG32(CB_PERF_CTR0_SEL_0, 0);
1236         WREG32(CB_PERF_CTR0_SEL_1, 0);
1237         WREG32(CB_PERF_CTR1_SEL_0, 0);
1238         WREG32(CB_PERF_CTR1_SEL_1, 0);
1239         WREG32(CB_PERF_CTR2_SEL_0, 0);
1240         WREG32(CB_PERF_CTR2_SEL_1, 0);
1241         WREG32(CB_PERF_CTR3_SEL_0, 0);
1242         WREG32(CB_PERF_CTR3_SEL_1, 0);
1243
1244         tmp = RREG32(HDP_MISC_CNTL);
1245         tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1246         WREG32(HDP_MISC_CNTL, tmp);
1247
1248         hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1249         WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1250
1251         WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1252
1253         udelay(50);
1254
1255         /* set clockgating golden values on TN */
1256         if (rdev->family == CHIP_ARUBA) {
1257                 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1258                 tmp &= ~0x00380000;
1259                 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1260                 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1261                 tmp &= ~0x0e000000;
1262                 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1263         }
1264 }
1265
1266 /*
1267  * GART
1268  */
1269 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1270 {
1271         /* flush hdp cache */
1272         WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1273
1274         /* bits 0-7 are the VM contexts0-7 */
1275         WREG32(VM_INVALIDATE_REQUEST, 1);
1276 }
1277
1278 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1279 {
1280         int i, r;
1281
1282         if (rdev->gart.robj == NULL) {
1283                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1284                 return -EINVAL;
1285         }
1286         r = radeon_gart_table_vram_pin(rdev);
1287         if (r)
1288                 return r;
1289         /* Setup TLB control */
1290         WREG32(MC_VM_MX_L1_TLB_CNTL,
1291                (0xA << 7) |
1292                ENABLE_L1_TLB |
1293                ENABLE_L1_FRAGMENT_PROCESSING |
1294                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1295                ENABLE_ADVANCED_DRIVER_MODEL |
1296                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1297         /* Setup L2 cache */
1298         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1299                ENABLE_L2_FRAGMENT_PROCESSING |
1300                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1301                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1302                EFFECTIVE_L2_QUEUE_SIZE(7) |
1303                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1304         WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1305         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1306                BANK_SELECT(6) |
1307                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1308         /* setup context0 */
1309         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1310         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1311         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1312         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1313                         (u32)(rdev->dummy_page.addr >> 12));
1314         WREG32(VM_CONTEXT0_CNTL2, 0);
1315         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1316                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1317
1318         WREG32(0x15D4, 0);
1319         WREG32(0x15D8, 0);
1320         WREG32(0x15DC, 0);
1321
1322         /* empty context1-7 */
1323         /* Assign the pt base to something valid for now; the pts used for
1324          * the VMs are determined by the application and setup and assigned
1325          * on the fly in the vm part of radeon_gart.c
1326          */
1327         for (i = 1; i < 8; i++) {
1328                 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1329                 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
1330                         rdev->vm_manager.max_pfn - 1);
1331                 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1332                        rdev->vm_manager.saved_table_addr[i]);
1333         }
1334
1335         /* enable context1-7 */
1336         WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1337                (u32)(rdev->dummy_page.addr >> 12));
1338         WREG32(VM_CONTEXT1_CNTL2, 4);
1339         WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1340                                 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1341                                 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1342                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1343                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1344                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1345                                 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1346                                 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1347                                 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1348                                 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1349                                 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1350                                 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1351                                 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1352                                 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1353
1354         cayman_pcie_gart_tlb_flush(rdev);
1355         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1356                  (unsigned)(rdev->mc.gtt_size >> 20),
1357                  (unsigned long long)rdev->gart.table_addr);
1358         rdev->gart.ready = true;
1359         return 0;
1360 }
1361
1362 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1363 {
1364         unsigned i;
1365
1366         for (i = 1; i < 8; ++i) {
1367                 rdev->vm_manager.saved_table_addr[i] = RREG32(
1368                         VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1369         }
1370
1371         /* Disable all tables */
1372         WREG32(VM_CONTEXT0_CNTL, 0);
1373         WREG32(VM_CONTEXT1_CNTL, 0);
1374         /* Setup TLB control */
1375         WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1376                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1377                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1378         /* Setup L2 cache */
1379         WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1380                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1381                EFFECTIVE_L2_QUEUE_SIZE(7) |
1382                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1383         WREG32(VM_L2_CNTL2, 0);
1384         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1385                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1386         radeon_gart_table_vram_unpin(rdev);
1387 }
1388
1389 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1390 {
1391         cayman_pcie_gart_disable(rdev);
1392         radeon_gart_table_vram_free(rdev);
1393         radeon_gart_fini(rdev);
1394 }
1395
1396 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1397                               int ring, u32 cp_int_cntl)
1398 {
1399         u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1400
1401         WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1402         WREG32(CP_INT_CNTL, cp_int_cntl);
1403 }
1404
1405 /*
1406  * CP.
1407  */
1408 void cayman_fence_ring_emit(struct radeon_device *rdev,
1409                             struct radeon_fence *fence)
1410 {
1411         struct radeon_ring *ring = &rdev->ring[fence->ring];
1412         u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1413         u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1414                 PACKET3_SH_ACTION_ENA;
1415
1416         /* flush read cache over gart for this vmid */
1417         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1418         radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1419         radeon_ring_write(ring, 0xFFFFFFFF);
1420         radeon_ring_write(ring, 0);
1421         radeon_ring_write(ring, 10); /* poll interval */
1422         /* EVENT_WRITE_EOP - flush caches, send int */
1423         radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1424         radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1425         radeon_ring_write(ring, lower_32_bits(addr));
1426         radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1427         radeon_ring_write(ring, fence->seq);
1428         radeon_ring_write(ring, 0);
1429 }
1430
1431 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1432 {
1433         struct radeon_ring *ring = &rdev->ring[ib->ring];
1434         unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1435         u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1436                 PACKET3_SH_ACTION_ENA;
1437
1438         /* set to DX10/11 mode */
1439         radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1440         radeon_ring_write(ring, 1);
1441
1442         if (ring->rptr_save_reg) {
1443                 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1444                 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1445                 radeon_ring_write(ring, ((ring->rptr_save_reg - 
1446                                           PACKET3_SET_CONFIG_REG_START) >> 2));
1447                 radeon_ring_write(ring, next_rptr);
1448         }
1449
1450         radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1451         radeon_ring_write(ring,
1452 #ifdef __BIG_ENDIAN
1453                           (2 << 0) |
1454 #endif
1455                           (ib->gpu_addr & 0xFFFFFFFC));
1456         radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1457         radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1458
1459         /* flush read cache over gart for this vmid */
1460         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1461         radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1462         radeon_ring_write(ring, 0xFFFFFFFF);
1463         radeon_ring_write(ring, 0);
1464         radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1465 }
1466
1467 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1468 {
1469         if (enable)
1470                 WREG32(CP_ME_CNTL, 0);
1471         else {
1472                 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1473                         radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1474                 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1475                 WREG32(SCRATCH_UMSK, 0);
1476                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1477         }
1478 }
1479
1480 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1481                         struct radeon_ring *ring)
1482 {
1483         u32 rptr;
1484
1485         if (rdev->wb.enabled)
1486                 rptr = rdev->wb.wb[ring->rptr_offs/4];
1487         else {
1488                 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1489                         rptr = RREG32(CP_RB0_RPTR);
1490                 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1491                         rptr = RREG32(CP_RB1_RPTR);
1492                 else
1493                         rptr = RREG32(CP_RB2_RPTR);
1494         }
1495
1496         return rptr;
1497 }
1498
1499 u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1500                         struct radeon_ring *ring)
1501 {
1502         u32 wptr;
1503
1504         if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1505                 wptr = RREG32(CP_RB0_WPTR);
1506         else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1507                 wptr = RREG32(CP_RB1_WPTR);
1508         else
1509                 wptr = RREG32(CP_RB2_WPTR);
1510
1511         return wptr;
1512 }
1513
1514 void cayman_gfx_set_wptr(struct radeon_device *rdev,
1515                          struct radeon_ring *ring)
1516 {
1517         if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1518                 WREG32(CP_RB0_WPTR, ring->wptr);
1519                 (void)RREG32(CP_RB0_WPTR);
1520         } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1521                 WREG32(CP_RB1_WPTR, ring->wptr);
1522                 (void)RREG32(CP_RB1_WPTR);
1523         } else {
1524                 WREG32(CP_RB2_WPTR, ring->wptr);
1525                 (void)RREG32(CP_RB2_WPTR);
1526         }
1527 }
1528
1529 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1530 {
1531         const __be32 *fw_data;
1532         int i;
1533
1534         if (!rdev->me_fw || !rdev->pfp_fw)
1535                 return -EINVAL;
1536
1537         cayman_cp_enable(rdev, false);
1538
1539         fw_data = (const __be32 *)rdev->pfp_fw->data;
1540         WREG32(CP_PFP_UCODE_ADDR, 0);
1541         for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1542                 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1543         WREG32(CP_PFP_UCODE_ADDR, 0);
1544
1545         fw_data = (const __be32 *)rdev->me_fw->data;
1546         WREG32(CP_ME_RAM_WADDR, 0);
1547         for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1548                 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1549
1550         WREG32(CP_PFP_UCODE_ADDR, 0);
1551         WREG32(CP_ME_RAM_WADDR, 0);
1552         WREG32(CP_ME_RAM_RADDR, 0);
1553         return 0;
1554 }
1555
1556 static int cayman_cp_start(struct radeon_device *rdev)
1557 {
1558         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1559         int r, i;
1560
1561         r = radeon_ring_lock(rdev, ring, 7);
1562         if (r) {
1563                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1564                 return r;
1565         }
1566         radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1567         radeon_ring_write(ring, 0x1);
1568         radeon_ring_write(ring, 0x0);
1569         radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1570         radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1571         radeon_ring_write(ring, 0);
1572         radeon_ring_write(ring, 0);
1573         radeon_ring_unlock_commit(rdev, ring, false);
1574
1575         cayman_cp_enable(rdev, true);
1576
1577         r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1578         if (r) {
1579                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1580                 return r;
1581         }
1582
1583         /* setup clear context state */
1584         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1585         radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1586
1587         for (i = 0; i < cayman_default_size; i++)
1588                 radeon_ring_write(ring, cayman_default_state[i]);
1589
1590         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1591         radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1592
1593         /* set clear context state */
1594         radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1595         radeon_ring_write(ring, 0);
1596
1597         /* SQ_VTX_BASE_VTX_LOC */
1598         radeon_ring_write(ring, 0xc0026f00);
1599         radeon_ring_write(ring, 0x00000000);
1600         radeon_ring_write(ring, 0x00000000);
1601         radeon_ring_write(ring, 0x00000000);
1602
1603         /* Clear consts */
1604         radeon_ring_write(ring, 0xc0036f00);
1605         radeon_ring_write(ring, 0x00000bc4);
1606         radeon_ring_write(ring, 0xffffffff);
1607         radeon_ring_write(ring, 0xffffffff);
1608         radeon_ring_write(ring, 0xffffffff);
1609
1610         radeon_ring_write(ring, 0xc0026900);
1611         radeon_ring_write(ring, 0x00000316);
1612         radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1613         radeon_ring_write(ring, 0x00000010); /*  */
1614
1615         radeon_ring_unlock_commit(rdev, ring, false);
1616
1617         /* XXX init other rings */
1618
1619         return 0;
1620 }
1621
1622 static void cayman_cp_fini(struct radeon_device *rdev)
1623 {
1624         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1625         cayman_cp_enable(rdev, false);
1626         radeon_ring_fini(rdev, ring);
1627         radeon_scratch_free(rdev, ring->rptr_save_reg);
1628 }
1629
1630 static int cayman_cp_resume(struct radeon_device *rdev)
1631 {
1632         static const int ridx[] = {
1633                 RADEON_RING_TYPE_GFX_INDEX,
1634                 CAYMAN_RING_TYPE_CP1_INDEX,
1635                 CAYMAN_RING_TYPE_CP2_INDEX
1636         };
1637         static const unsigned cp_rb_cntl[] = {
1638                 CP_RB0_CNTL,
1639                 CP_RB1_CNTL,
1640                 CP_RB2_CNTL,
1641         };
1642         static const unsigned cp_rb_rptr_addr[] = {
1643                 CP_RB0_RPTR_ADDR,
1644                 CP_RB1_RPTR_ADDR,
1645                 CP_RB2_RPTR_ADDR
1646         };
1647         static const unsigned cp_rb_rptr_addr_hi[] = {
1648                 CP_RB0_RPTR_ADDR_HI,
1649                 CP_RB1_RPTR_ADDR_HI,
1650                 CP_RB2_RPTR_ADDR_HI
1651         };
1652         static const unsigned cp_rb_base[] = {
1653                 CP_RB0_BASE,
1654                 CP_RB1_BASE,
1655                 CP_RB2_BASE
1656         };
1657         static const unsigned cp_rb_rptr[] = {
1658                 CP_RB0_RPTR,
1659                 CP_RB1_RPTR,
1660                 CP_RB2_RPTR
1661         };
1662         static const unsigned cp_rb_wptr[] = {
1663                 CP_RB0_WPTR,
1664                 CP_RB1_WPTR,
1665                 CP_RB2_WPTR
1666         };
1667         struct radeon_ring *ring;
1668         int i, r;
1669
1670         /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1671         WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1672                                  SOFT_RESET_PA |
1673                                  SOFT_RESET_SH |
1674                                  SOFT_RESET_VGT |
1675                                  SOFT_RESET_SPI |
1676                                  SOFT_RESET_SX));
1677         RREG32(GRBM_SOFT_RESET);
1678         mdelay(15);
1679         WREG32(GRBM_SOFT_RESET, 0);
1680         RREG32(GRBM_SOFT_RESET);
1681
1682         WREG32(CP_SEM_WAIT_TIMER, 0x0);
1683         WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1684
1685         /* Set the write pointer delay */
1686         WREG32(CP_RB_WPTR_DELAY, 0);
1687
1688         WREG32(CP_DEBUG, (1 << 27));
1689
1690         /* set the wb address whether it's enabled or not */
1691         WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1692         WREG32(SCRATCH_UMSK, 0xff);
1693
1694         for (i = 0; i < 3; ++i) {
1695                 uint32_t rb_cntl;
1696                 uint64_t addr;
1697
1698                 /* Set ring buffer size */
1699                 ring = &rdev->ring[ridx[i]];
1700                 rb_cntl = order_base_2(ring->ring_size / 8);
1701                 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1702 #ifdef __BIG_ENDIAN
1703                 rb_cntl |= BUF_SWAP_32BIT;
1704 #endif
1705                 WREG32(cp_rb_cntl[i], rb_cntl);
1706
1707                 /* set the wb address whether it's enabled or not */
1708                 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1709                 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1710                 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1711         }
1712
1713         /* set the rb base addr, this causes an internal reset of ALL rings */
1714         for (i = 0; i < 3; ++i) {
1715                 ring = &rdev->ring[ridx[i]];
1716                 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1717         }
1718
1719         for (i = 0; i < 3; ++i) {
1720                 /* Initialize the ring buffer's read and write pointers */
1721                 ring = &rdev->ring[ridx[i]];
1722                 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1723
1724                 ring->wptr = 0;
1725                 WREG32(cp_rb_rptr[i], 0);
1726                 WREG32(cp_rb_wptr[i], ring->wptr);
1727
1728                 mdelay(1);
1729                 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1730         }
1731
1732         /* start the rings */
1733         cayman_cp_start(rdev);
1734         rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1735         rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1736         rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1737         /* this only test cp0 */
1738         r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1739         if (r) {
1740                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1741                 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1742                 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1743                 return r;
1744         }
1745
1746         if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1747                 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1748
1749         return 0;
1750 }
1751
1752 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1753 {
1754         u32 reset_mask = 0;
1755         u32 tmp;
1756
1757         /* GRBM_STATUS */
1758         tmp = RREG32(GRBM_STATUS);
1759         if (tmp & (PA_BUSY | SC_BUSY |
1760                    SH_BUSY | SX_BUSY |
1761                    TA_BUSY | VGT_BUSY |
1762                    DB_BUSY | CB_BUSY |
1763                    GDS_BUSY | SPI_BUSY |
1764                    IA_BUSY | IA_BUSY_NO_DMA))
1765                 reset_mask |= RADEON_RESET_GFX;
1766
1767         if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1768                    CP_BUSY | CP_COHERENCY_BUSY))
1769                 reset_mask |= RADEON_RESET_CP;
1770
1771         if (tmp & GRBM_EE_BUSY)
1772                 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1773
1774         /* DMA_STATUS_REG 0 */
1775         tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1776         if (!(tmp & DMA_IDLE))
1777                 reset_mask |= RADEON_RESET_DMA;
1778
1779         /* DMA_STATUS_REG 1 */
1780         tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1781         if (!(tmp & DMA_IDLE))
1782                 reset_mask |= RADEON_RESET_DMA1;
1783
1784         /* SRBM_STATUS2 */
1785         tmp = RREG32(SRBM_STATUS2);
1786         if (tmp & DMA_BUSY)
1787                 reset_mask |= RADEON_RESET_DMA;
1788
1789         if (tmp & DMA1_BUSY)
1790                 reset_mask |= RADEON_RESET_DMA1;
1791
1792         /* SRBM_STATUS */
1793         tmp = RREG32(SRBM_STATUS);
1794         if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1795                 reset_mask |= RADEON_RESET_RLC;
1796
1797         if (tmp & IH_BUSY)
1798                 reset_mask |= RADEON_RESET_IH;
1799
1800         if (tmp & SEM_BUSY)
1801                 reset_mask |= RADEON_RESET_SEM;
1802
1803         if (tmp & GRBM_RQ_PENDING)
1804                 reset_mask |= RADEON_RESET_GRBM;
1805
1806         if (tmp & VMC_BUSY)
1807                 reset_mask |= RADEON_RESET_VMC;
1808
1809         if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1810                    MCC_BUSY | MCD_BUSY))
1811                 reset_mask |= RADEON_RESET_MC;
1812
1813         if (evergreen_is_display_hung(rdev))
1814                 reset_mask |= RADEON_RESET_DISPLAY;
1815
1816         /* VM_L2_STATUS */
1817         tmp = RREG32(VM_L2_STATUS);
1818         if (tmp & L2_BUSY)
1819                 reset_mask |= RADEON_RESET_VMC;
1820
1821         /* Skip MC reset as it's mostly likely not hung, just busy */
1822         if (reset_mask & RADEON_RESET_MC) {
1823                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1824                 reset_mask &= ~RADEON_RESET_MC;
1825         }
1826
1827         return reset_mask;
1828 }
1829
1830 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1831 {
1832         struct evergreen_mc_save save;
1833         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1834         u32 tmp;
1835
1836         if (reset_mask == 0)
1837                 return;
1838
1839         dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1840
1841         evergreen_print_gpu_status_regs(rdev);
1842         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1843                  RREG32(0x14F8));
1844         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1845                  RREG32(0x14D8));
1846         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1847                  RREG32(0x14FC));
1848         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1849                  RREG32(0x14DC));
1850
1851         /* Disable CP parsing/prefetching */
1852         WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1853
1854         if (reset_mask & RADEON_RESET_DMA) {
1855                 /* dma0 */
1856                 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1857                 tmp &= ~DMA_RB_ENABLE;
1858                 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1859         }
1860
1861         if (reset_mask & RADEON_RESET_DMA1) {
1862                 /* dma1 */
1863                 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1864                 tmp &= ~DMA_RB_ENABLE;
1865                 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1866         }
1867
1868         udelay(50);
1869
1870         evergreen_mc_stop(rdev, &save);
1871         if (evergreen_mc_wait_for_idle(rdev)) {
1872                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1873         }
1874
1875         if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1876                 grbm_soft_reset = SOFT_RESET_CB |
1877                         SOFT_RESET_DB |
1878                         SOFT_RESET_GDS |
1879                         SOFT_RESET_PA |
1880                         SOFT_RESET_SC |
1881                         SOFT_RESET_SPI |
1882                         SOFT_RESET_SH |
1883                         SOFT_RESET_SX |
1884                         SOFT_RESET_TC |
1885                         SOFT_RESET_TA |
1886                         SOFT_RESET_VGT |
1887                         SOFT_RESET_IA;
1888         }
1889
1890         if (reset_mask & RADEON_RESET_CP) {
1891                 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1892
1893                 srbm_soft_reset |= SOFT_RESET_GRBM;
1894         }
1895
1896         if (reset_mask & RADEON_RESET_DMA)
1897                 srbm_soft_reset |= SOFT_RESET_DMA;
1898
1899         if (reset_mask & RADEON_RESET_DMA1)
1900                 srbm_soft_reset |= SOFT_RESET_DMA1;
1901
1902         if (reset_mask & RADEON_RESET_DISPLAY)
1903                 srbm_soft_reset |= SOFT_RESET_DC;
1904
1905         if (reset_mask & RADEON_RESET_RLC)
1906                 srbm_soft_reset |= SOFT_RESET_RLC;
1907
1908         if (reset_mask & RADEON_RESET_SEM)
1909                 srbm_soft_reset |= SOFT_RESET_SEM;
1910
1911         if (reset_mask & RADEON_RESET_IH)
1912                 srbm_soft_reset |= SOFT_RESET_IH;
1913
1914         if (reset_mask & RADEON_RESET_GRBM)
1915                 srbm_soft_reset |= SOFT_RESET_GRBM;
1916
1917         if (reset_mask & RADEON_RESET_VMC)
1918                 srbm_soft_reset |= SOFT_RESET_VMC;
1919
1920         if (!(rdev->flags & RADEON_IS_IGP)) {
1921                 if (reset_mask & RADEON_RESET_MC)
1922                         srbm_soft_reset |= SOFT_RESET_MC;
1923         }
1924
1925         if (grbm_soft_reset) {
1926                 tmp = RREG32(GRBM_SOFT_RESET);
1927                 tmp |= grbm_soft_reset;
1928                 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1929                 WREG32(GRBM_SOFT_RESET, tmp);
1930                 tmp = RREG32(GRBM_SOFT_RESET);
1931
1932                 udelay(50);
1933
1934                 tmp &= ~grbm_soft_reset;
1935                 WREG32(GRBM_SOFT_RESET, tmp);
1936                 tmp = RREG32(GRBM_SOFT_RESET);
1937         }
1938
1939         if (srbm_soft_reset) {
1940                 tmp = RREG32(SRBM_SOFT_RESET);
1941                 tmp |= srbm_soft_reset;
1942                 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1943                 WREG32(SRBM_SOFT_RESET, tmp);
1944                 tmp = RREG32(SRBM_SOFT_RESET);
1945
1946                 udelay(50);
1947
1948                 tmp &= ~srbm_soft_reset;
1949                 WREG32(SRBM_SOFT_RESET, tmp);
1950                 tmp = RREG32(SRBM_SOFT_RESET);
1951         }
1952
1953         /* Wait a little for things to settle down */
1954         udelay(50);
1955
1956         evergreen_mc_resume(rdev, &save);
1957         udelay(50);
1958
1959         evergreen_print_gpu_status_regs(rdev);
1960 }
1961
1962 int cayman_asic_reset(struct radeon_device *rdev)
1963 {
1964         u32 reset_mask;
1965
1966         reset_mask = cayman_gpu_check_soft_reset(rdev);
1967
1968         if (reset_mask)
1969                 r600_set_bios_scratch_engine_hung(rdev, true);
1970
1971         cayman_gpu_soft_reset(rdev, reset_mask);
1972
1973         reset_mask = cayman_gpu_check_soft_reset(rdev);
1974
1975         if (reset_mask)
1976                 evergreen_gpu_pci_config_reset(rdev);
1977
1978         r600_set_bios_scratch_engine_hung(rdev, false);
1979
1980         return 0;
1981 }
1982
1983 /**
1984  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1985  *
1986  * @rdev: radeon_device pointer
1987  * @ring: radeon_ring structure holding ring information
1988  *
1989  * Check if the GFX engine is locked up.
1990  * Returns true if the engine appears to be locked up, false if not.
1991  */
1992 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1993 {
1994         u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1995
1996         if (!(reset_mask & (RADEON_RESET_GFX |
1997                             RADEON_RESET_COMPUTE |
1998                             RADEON_RESET_CP))) {
1999                 radeon_ring_lockup_update(rdev, ring);
2000                 return false;
2001         }
2002         return radeon_ring_test_lockup(rdev, ring);
2003 }
2004
2005 static int cayman_startup(struct radeon_device *rdev)
2006 {
2007         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2008         int r;
2009
2010         /* enable pcie gen2 link */
2011         evergreen_pcie_gen2_enable(rdev);
2012         /* enable aspm */
2013         evergreen_program_aspm(rdev);
2014
2015         /* scratch needs to be initialized before MC */
2016         r = r600_vram_scratch_init(rdev);
2017         if (r)
2018                 return r;
2019
2020         evergreen_mc_program(rdev);
2021
2022         if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
2023                 r = ni_mc_load_microcode(rdev);
2024                 if (r) {
2025                         DRM_ERROR("Failed to load MC firmware!\n");
2026                         return r;
2027                 }
2028         }
2029
2030         r = cayman_pcie_gart_enable(rdev);
2031         if (r)
2032                 return r;
2033         cayman_gpu_init(rdev);
2034
2035         /* allocate rlc buffers */
2036         if (rdev->flags & RADEON_IS_IGP) {
2037                 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2038                 rdev->rlc.reg_list_size =
2039                         (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2040                 rdev->rlc.cs_data = cayman_cs_data;
2041                 r = sumo_rlc_init(rdev);
2042                 if (r) {
2043                         DRM_ERROR("Failed to init rlc BOs!\n");
2044                         return r;
2045                 }
2046         }
2047
2048         /* allocate wb buffer */
2049         r = radeon_wb_init(rdev);
2050         if (r)
2051                 return r;
2052
2053         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2054         if (r) {
2055                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2056                 return r;
2057         }
2058
2059         r = uvd_v2_2_resume(rdev);
2060         if (!r) {
2061                 r = radeon_fence_driver_start_ring(rdev,
2062                                                    R600_RING_TYPE_UVD_INDEX);
2063                 if (r)
2064                         dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
2065         }
2066         if (r)
2067                 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2068
2069         if (rdev->family == CHIP_ARUBA) {
2070                 r = radeon_vce_resume(rdev);
2071                 if (!r)
2072                         r = vce_v1_0_resume(rdev);
2073
2074                 if (!r)
2075                         r = radeon_fence_driver_start_ring(rdev,
2076                                                            TN_RING_TYPE_VCE1_INDEX);
2077                 if (!r)
2078                         r = radeon_fence_driver_start_ring(rdev,
2079                                                            TN_RING_TYPE_VCE2_INDEX);
2080
2081                 if (r) {
2082                         dev_err(rdev->dev, "VCE init error (%d).\n", r);
2083                         rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
2084                         rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
2085                 }
2086         }
2087
2088         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2089         if (r) {
2090                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2091                 return r;
2092         }
2093
2094         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2095         if (r) {
2096                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2097                 return r;
2098         }
2099
2100         r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2101         if (r) {
2102                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2103                 return r;
2104         }
2105
2106         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2107         if (r) {
2108                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2109                 return r;
2110         }
2111
2112         /* Enable IRQ */
2113         if (!rdev->irq.installed) {
2114                 r = radeon_irq_kms_init(rdev);
2115                 if (r)
2116                         return r;
2117         }
2118
2119         r = r600_irq_init(rdev);
2120         if (r) {
2121                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2122                 radeon_irq_kms_fini(rdev);
2123                 return r;
2124         }
2125         evergreen_irq_set(rdev);
2126
2127         r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2128                              RADEON_CP_PACKET2);
2129         if (r)
2130                 return r;
2131
2132         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2133         r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2134                              DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2135         if (r)
2136                 return r;
2137
2138         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2139         r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2140                              DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2141         if (r)
2142                 return r;
2143
2144         r = cayman_cp_load_microcode(rdev);
2145         if (r)
2146                 return r;
2147         r = cayman_cp_resume(rdev);
2148         if (r)
2149                 return r;
2150
2151         r = cayman_dma_resume(rdev);
2152         if (r)
2153                 return r;
2154
2155         ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2156         if (ring->ring_size) {
2157                 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
2158                                      RADEON_CP_PACKET2);
2159                 if (!r)
2160                         r = uvd_v1_0_init(rdev);
2161                 if (r)
2162                         DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2163         }
2164
2165         if (rdev->family == CHIP_ARUBA) {
2166                 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2167                 if (ring->ring_size)
2168                         r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2169
2170                 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2171                 if (ring->ring_size)
2172                         r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2173
2174                 if (!r)
2175                         r = vce_v1_0_init(rdev);
2176                 if (r)
2177                         DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2178         }
2179
2180         r = radeon_ib_pool_init(rdev);
2181         if (r) {
2182                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2183                 return r;
2184         }
2185
2186         r = radeon_vm_manager_init(rdev);
2187         if (r) {
2188                 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2189                 return r;
2190         }
2191
2192         r = radeon_audio_init(rdev);
2193         if (r)
2194                 return r;
2195
2196         return 0;
2197 }
2198
2199 int cayman_resume(struct radeon_device *rdev)
2200 {
2201         int r;
2202
2203         /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2204          * posting will perform necessary task to bring back GPU into good
2205          * shape.
2206          */
2207         /* post card */
2208         atom_asic_init(rdev->mode_info.atom_context);
2209
2210         /* init golden registers */
2211         ni_init_golden_registers(rdev);
2212
2213         if (rdev->pm.pm_method == PM_METHOD_DPM)
2214                 radeon_pm_resume(rdev);
2215
2216         rdev->accel_working = true;
2217         r = cayman_startup(rdev);
2218         if (r) {
2219                 DRM_ERROR("cayman startup failed on resume\n");
2220                 rdev->accel_working = false;
2221                 return r;
2222         }
2223         return r;
2224 }
2225
2226 int cayman_suspend(struct radeon_device *rdev)
2227 {
2228         radeon_pm_suspend(rdev);
2229         radeon_audio_fini(rdev);
2230         radeon_vm_manager_fini(rdev);
2231         cayman_cp_enable(rdev, false);
2232         cayman_dma_stop(rdev);
2233         uvd_v1_0_fini(rdev);
2234         radeon_uvd_suspend(rdev);
2235         evergreen_irq_suspend(rdev);
2236         radeon_wb_disable(rdev);
2237         cayman_pcie_gart_disable(rdev);
2238         return 0;
2239 }
2240
2241 /* Plan is to move initialization in that function and use
2242  * helper function so that radeon_device_init pretty much
2243  * do nothing more than calling asic specific function. This
2244  * should also allow to remove a bunch of callback function
2245  * like vram_info.
2246  */
2247 int cayman_init(struct radeon_device *rdev)
2248 {
2249         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2250         int r;
2251
2252         /* Read BIOS */
2253         if (!radeon_get_bios(rdev)) {
2254                 if (ASIC_IS_AVIVO(rdev))
2255                         return -EINVAL;
2256         }
2257         /* Must be an ATOMBIOS */
2258         if (!rdev->is_atom_bios) {
2259                 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2260                 return -EINVAL;
2261         }
2262         r = radeon_atombios_init(rdev);
2263         if (r)
2264                 return r;
2265
2266         /* Post card if necessary */
2267         if (!radeon_card_posted(rdev)) {
2268                 if (!rdev->bios) {
2269                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2270                         return -EINVAL;
2271                 }
2272                 DRM_INFO("GPU not posted. posting now...\n");
2273                 atom_asic_init(rdev->mode_info.atom_context);
2274         }
2275         /* init golden registers */
2276         ni_init_golden_registers(rdev);
2277         /* Initialize scratch registers */
2278         r600_scratch_init(rdev);
2279         /* Initialize surface registers */
2280         radeon_surface_init(rdev);
2281         /* Initialize clocks */
2282         radeon_get_clock_info(rdev->ddev);
2283         /* Fence driver */
2284         r = radeon_fence_driver_init(rdev);
2285         if (r)
2286                 return r;
2287         /* initialize memory controller */
2288         r = evergreen_mc_init(rdev);
2289         if (r)
2290                 return r;
2291         /* Memory manager */
2292         r = radeon_bo_init(rdev);
2293         if (r)
2294                 return r;
2295
2296         if (rdev->flags & RADEON_IS_IGP) {
2297                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2298                         r = ni_init_microcode(rdev);
2299                         if (r) {
2300                                 DRM_ERROR("Failed to load firmware!\n");
2301                                 return r;
2302                         }
2303                 }
2304         } else {
2305                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2306                         r = ni_init_microcode(rdev);
2307                         if (r) {
2308                                 DRM_ERROR("Failed to load firmware!\n");
2309                                 return r;
2310                         }
2311                 }
2312         }
2313
2314         /* Initialize power management */
2315         radeon_pm_init(rdev);
2316
2317         ring->ring_obj = NULL;
2318         r600_ring_init(rdev, ring, 1024 * 1024);
2319
2320         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2321         ring->ring_obj = NULL;
2322         r600_ring_init(rdev, ring, 64 * 1024);
2323
2324         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2325         ring->ring_obj = NULL;
2326         r600_ring_init(rdev, ring, 64 * 1024);
2327
2328         r = radeon_uvd_init(rdev);
2329         if (!r) {
2330                 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2331                 ring->ring_obj = NULL;
2332                 r600_ring_init(rdev, ring, 4096);
2333         }
2334
2335         if (rdev->family == CHIP_ARUBA) {
2336                 r = radeon_vce_init(rdev);
2337                 if (!r) {
2338                         ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2339                         ring->ring_obj = NULL;
2340                         r600_ring_init(rdev, ring, 4096);
2341
2342                         ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2343                         ring->ring_obj = NULL;
2344                         r600_ring_init(rdev, ring, 4096);
2345                 }
2346         }
2347
2348         rdev->ih.ring_obj = NULL;
2349         r600_ih_ring_init(rdev, 64 * 1024);
2350
2351         r = r600_pcie_gart_init(rdev);
2352         if (r)
2353                 return r;
2354
2355         rdev->accel_working = true;
2356         r = cayman_startup(rdev);
2357         if (r) {
2358                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2359                 cayman_cp_fini(rdev);
2360                 cayman_dma_fini(rdev);
2361                 r600_irq_fini(rdev);
2362                 if (rdev->flags & RADEON_IS_IGP)
2363                         sumo_rlc_fini(rdev);
2364                 radeon_wb_fini(rdev);
2365                 radeon_ib_pool_fini(rdev);
2366                 radeon_vm_manager_fini(rdev);
2367                 radeon_irq_kms_fini(rdev);
2368                 cayman_pcie_gart_fini(rdev);
2369                 rdev->accel_working = false;
2370         }
2371
2372         /* Don't start up if the MC ucode is missing.
2373          * The default clocks and voltages before the MC ucode
2374          * is loaded are not suffient for advanced operations.
2375          *
2376          * We can skip this check for TN, because there is no MC
2377          * ucode.
2378          */
2379         if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2380                 DRM_ERROR("radeon: MC ucode required for NI+.\n");
2381                 return -EINVAL;
2382         }
2383
2384         return 0;
2385 }
2386
2387 void cayman_fini(struct radeon_device *rdev)
2388 {
2389         radeon_pm_fini(rdev);
2390         cayman_cp_fini(rdev);
2391         cayman_dma_fini(rdev);
2392         r600_irq_fini(rdev);
2393         if (rdev->flags & RADEON_IS_IGP)
2394                 sumo_rlc_fini(rdev);
2395         radeon_wb_fini(rdev);
2396         radeon_vm_manager_fini(rdev);
2397         radeon_ib_pool_fini(rdev);
2398         radeon_irq_kms_fini(rdev);
2399         uvd_v1_0_fini(rdev);
2400         radeon_uvd_fini(rdev);
2401         if (rdev->family == CHIP_ARUBA)
2402                 radeon_vce_fini(rdev);
2403         cayman_pcie_gart_fini(rdev);
2404         r600_vram_scratch_fini(rdev);
2405         radeon_gem_fini(rdev);
2406         radeon_fence_driver_fini(rdev);
2407         radeon_bo_fini(rdev);
2408         radeon_atombios_fini(rdev);
2409         kfree(rdev->bios);
2410         rdev->bios = NULL;
2411 }
2412
2413 /*
2414  * vm
2415  */
2416 int cayman_vm_init(struct radeon_device *rdev)
2417 {
2418         /* number of VMs */
2419         rdev->vm_manager.nvm = 8;
2420         /* base offset of vram pages */
2421         if (rdev->flags & RADEON_IS_IGP) {
2422                 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2423                 tmp <<= 22;
2424                 rdev->vm_manager.vram_base_offset = tmp;
2425         } else
2426                 rdev->vm_manager.vram_base_offset = 0;
2427         return 0;
2428 }
2429
2430 void cayman_vm_fini(struct radeon_device *rdev)
2431 {
2432 }
2433
2434 /**
2435  * cayman_vm_decode_fault - print human readable fault info
2436  *
2437  * @rdev: radeon_device pointer
2438  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2439  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2440  *
2441  * Print human readable fault information (cayman/TN).
2442  */
2443 void cayman_vm_decode_fault(struct radeon_device *rdev,
2444                             u32 status, u32 addr)
2445 {
2446         u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2447         u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2448         u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2449         char *block;
2450
2451         switch (mc_id) {
2452         case 32:
2453         case 16:
2454         case 96:
2455         case 80:
2456         case 160:
2457         case 144:
2458         case 224:
2459         case 208:
2460                 block = "CB";
2461                 break;
2462         case 33:
2463         case 17:
2464         case 97:
2465         case 81:
2466         case 161:
2467         case 145:
2468         case 225:
2469         case 209:
2470                 block = "CB_FMASK";
2471                 break;
2472         case 34:
2473         case 18:
2474         case 98:
2475         case 82:
2476         case 162:
2477         case 146:
2478         case 226:
2479         case 210:
2480                 block = "CB_CMASK";
2481                 break;
2482         case 35:
2483         case 19:
2484         case 99:
2485         case 83:
2486         case 163:
2487         case 147:
2488         case 227:
2489         case 211:
2490                 block = "CB_IMMED";
2491                 break;
2492         case 36:
2493         case 20:
2494         case 100:
2495         case 84:
2496         case 164:
2497         case 148:
2498         case 228:
2499         case 212:
2500                 block = "DB";
2501                 break;
2502         case 37:
2503         case 21:
2504         case 101:
2505         case 85:
2506         case 165:
2507         case 149:
2508         case 229:
2509         case 213:
2510                 block = "DB_HTILE";
2511                 break;
2512         case 38:
2513         case 22:
2514         case 102:
2515         case 86:
2516         case 166:
2517         case 150:
2518         case 230:
2519         case 214:
2520                 block = "SX";
2521                 break;
2522         case 39:
2523         case 23:
2524         case 103:
2525         case 87:
2526         case 167:
2527         case 151:
2528         case 231:
2529         case 215:
2530                 block = "DB_STEN";
2531                 break;
2532         case 40:
2533         case 24:
2534         case 104:
2535         case 88:
2536         case 232:
2537         case 216:
2538         case 168:
2539         case 152:
2540                 block = "TC_TFETCH";
2541                 break;
2542         case 41:
2543         case 25:
2544         case 105:
2545         case 89:
2546         case 233:
2547         case 217:
2548         case 169:
2549         case 153:
2550                 block = "TC_VFETCH";
2551                 break;
2552         case 42:
2553         case 26:
2554         case 106:
2555         case 90:
2556         case 234:
2557         case 218:
2558         case 170:
2559         case 154:
2560                 block = "VC";
2561                 break;
2562         case 112:
2563                 block = "CP";
2564                 break;
2565         case 113:
2566         case 114:
2567                 block = "SH";
2568                 break;
2569         case 115:
2570                 block = "VGT";
2571                 break;
2572         case 178:
2573                 block = "IH";
2574                 break;
2575         case 51:
2576                 block = "RLC";
2577                 break;
2578         case 55:
2579                 block = "DMA";
2580                 break;
2581         case 56:
2582                 block = "HDP";
2583                 break;
2584         default:
2585                 block = "unknown";
2586                 break;
2587         }
2588
2589         printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2590                protections, vmid, addr,
2591                (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2592                block, mc_id);
2593 }
2594
2595 /**
2596  * cayman_vm_flush - vm flush using the CP
2597  *
2598  * @rdev: radeon_device pointer
2599  *
2600  * Update the page table base and flush the VM TLB
2601  * using the CP (cayman-si).
2602  */
2603 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2604                      unsigned vm_id, uint64_t pd_addr)
2605 {
2606         radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2607         radeon_ring_write(ring, pd_addr >> 12);
2608
2609         /* flush hdp cache */
2610         radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2611         radeon_ring_write(ring, 0x1);
2612
2613         /* bits 0-7 are the VM contexts0-7 */
2614         radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2615         radeon_ring_write(ring, 1 << vm_id);
2616
2617         /* wait for the invalidate to complete */
2618         radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2619         radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
2620                                  WAIT_REG_MEM_ENGINE(0))); /* me */
2621         radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2622         radeon_ring_write(ring, 0);
2623         radeon_ring_write(ring, 0); /* ref */
2624         radeon_ring_write(ring, 0); /* mask */
2625         radeon_ring_write(ring, 0x20); /* poll interval */
2626
2627         /* sync PFP to ME, otherwise we might get invalid PFP reads */
2628         radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2629         radeon_ring_write(ring, 0x0);
2630 }
2631
2632 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
2633 {
2634         struct atom_clock_dividers dividers;
2635         int r, i;
2636
2637         r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2638                                            ecclk, false, &dividers);
2639         if (r)
2640                 return r;
2641
2642         for (i = 0; i < 100; i++) {
2643                 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2644                         break;
2645                 mdelay(10);
2646         }
2647         if (i == 100)
2648                 return -ETIMEDOUT;
2649
2650         WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
2651
2652         for (i = 0; i < 100; i++) {
2653                 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2654                         break;
2655                 mdelay(10);
2656         }
2657         if (i == 100)
2658                 return -ETIMEDOUT;
2659
2660         return 0;
2661 }