2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 #define MAX(a,b) (((a)>(b))?(a):(b))
35 #define MIN(a,b) (((a)<(b))?(a):(b))
37 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
40 struct evergreen_cs_track {
46 u32 nsamples; /* unused */
47 struct radeon_bo *cb_color_bo[12];
48 u32 cb_color_bo_offset[12];
49 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
50 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
51 u32 cb_color_info[12];
52 u32 cb_color_view[12];
53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12];
55 u32 cb_color_slice_idx[12];
56 u32 cb_color_attrib[12];
57 u32 cb_color_cmask_slice[8];/* unused */
58 u32 cb_color_fmask_slice[8];/* unused */
60 u32 cb_shader_mask; /* unused */
61 u32 vgt_strmout_config;
62 u32 vgt_strmout_buffer_config;
63 struct radeon_bo *vgt_strmout_bo[4];
64 u32 vgt_strmout_bo_offset[4];
65 u32 vgt_strmout_size[4];
72 u32 db_z_write_offset;
73 struct radeon_bo *db_z_read_bo;
74 struct radeon_bo *db_z_write_bo;
77 u32 db_s_write_offset;
78 struct radeon_bo *db_s_read_bo;
79 struct radeon_bo *db_s_write_bo;
80 bool sx_misc_kill_all_prims;
86 struct radeon_bo *htile_bo;
89 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
91 if (tiling_flags & RADEON_TILING_MACRO)
92 return ARRAY_2D_TILED_THIN1;
93 else if (tiling_flags & RADEON_TILING_MICRO)
94 return ARRAY_1D_TILED_THIN1;
96 return ARRAY_LINEAR_GENERAL;
99 static u32 evergreen_cs_get_num_banks(u32 nbanks)
103 return ADDR_SURF_2_BANK;
105 return ADDR_SURF_4_BANK;
108 return ADDR_SURF_8_BANK;
110 return ADDR_SURF_16_BANK;
114 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
118 for (i = 0; i < 8; i++) {
119 track->cb_color_fmask_bo[i] = NULL;
120 track->cb_color_cmask_bo[i] = NULL;
121 track->cb_color_cmask_slice[i] = 0;
122 track->cb_color_fmask_slice[i] = 0;
125 for (i = 0; i < 12; i++) {
126 track->cb_color_bo[i] = NULL;
127 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
128 track->cb_color_info[i] = 0;
129 track->cb_color_view[i] = 0xFFFFFFFF;
130 track->cb_color_pitch[i] = 0;
131 track->cb_color_slice[i] = 0xfffffff;
132 track->cb_color_slice_idx[i] = 0;
134 track->cb_target_mask = 0xFFFFFFFF;
135 track->cb_shader_mask = 0xFFFFFFFF;
136 track->cb_dirty = true;
138 track->db_depth_slice = 0xffffffff;
139 track->db_depth_view = 0xFFFFC000;
140 track->db_depth_size = 0xFFFFFFFF;
141 track->db_depth_control = 0xFFFFFFFF;
142 track->db_z_info = 0xFFFFFFFF;
143 track->db_z_read_offset = 0xFFFFFFFF;
144 track->db_z_write_offset = 0xFFFFFFFF;
145 track->db_z_read_bo = NULL;
146 track->db_z_write_bo = NULL;
147 track->db_s_info = 0xFFFFFFFF;
148 track->db_s_read_offset = 0xFFFFFFFF;
149 track->db_s_write_offset = 0xFFFFFFFF;
150 track->db_s_read_bo = NULL;
151 track->db_s_write_bo = NULL;
152 track->db_dirty = true;
153 track->htile_bo = NULL;
154 track->htile_offset = 0xFFFFFFFF;
155 track->htile_surface = 0;
157 for (i = 0; i < 4; i++) {
158 track->vgt_strmout_size[i] = 0;
159 track->vgt_strmout_bo[i] = NULL;
160 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
162 track->streamout_dirty = true;
163 track->sx_misc_kill_all_prims = false;
167 /* value gathered from cs */
183 unsigned long base_align;
186 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
187 struct eg_surface *surf,
190 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
191 surf->base_align = surf->bpe;
197 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
198 struct eg_surface *surf,
201 struct evergreen_cs_track *track = p->track;
204 palign = MAX(64, track->group_size / surf->bpe);
205 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
206 surf->base_align = track->group_size;
207 surf->palign = palign;
209 if (surf->nbx & (palign - 1)) {
211 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
212 __func__, __LINE__, prefix, surf->nbx, palign);
219 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
220 struct eg_surface *surf,
223 struct evergreen_cs_track *track = p->track;
226 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
227 palign = MAX(8, palign);
228 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
229 surf->base_align = track->group_size;
230 surf->palign = palign;
232 if ((surf->nbx & (palign - 1))) {
234 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
235 __func__, __LINE__, prefix, surf->nbx, palign,
236 track->group_size, surf->bpe, surf->nsamples);
240 if ((surf->nby & (8 - 1))) {
242 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
243 __func__, __LINE__, prefix, surf->nby);
250 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
251 struct eg_surface *surf,
254 struct evergreen_cs_track *track = p->track;
255 unsigned palign, halign, tileb, slice_pt;
256 unsigned mtile_pr, mtile_ps, mtileb;
258 tileb = 64 * surf->bpe * surf->nsamples;
260 if (tileb > surf->tsplit) {
261 slice_pt = tileb / surf->tsplit;
263 tileb = tileb / slice_pt;
264 /* macro tile width & height */
265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
267 mtileb = (palign / 8) * (halign / 8) * tileb;;
268 mtile_pr = surf->nbx / palign;
269 mtile_ps = (mtile_pr * surf->nby) / halign;
270 surf->layer_size = mtile_ps * mtileb * slice_pt;
271 surf->base_align = (palign / 8) * (halign / 8) * tileb;
272 surf->palign = palign;
273 surf->halign = halign;
275 if ((surf->nbx & (palign - 1))) {
277 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
278 __func__, __LINE__, prefix, surf->nbx, palign);
282 if ((surf->nby & (halign - 1))) {
284 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
285 __func__, __LINE__, prefix, surf->nby, halign);
293 static int evergreen_surface_check(struct radeon_cs_parser *p,
294 struct eg_surface *surf,
297 /* some common value computed here */
298 surf->bpe = r600_fmt_get_blocksize(surf->format);
300 switch (surf->mode) {
301 case ARRAY_LINEAR_GENERAL:
302 return evergreen_surface_check_linear(p, surf, prefix);
303 case ARRAY_LINEAR_ALIGNED:
304 return evergreen_surface_check_linear_aligned(p, surf, prefix);
305 case ARRAY_1D_TILED_THIN1:
306 return evergreen_surface_check_1d(p, surf, prefix);
307 case ARRAY_2D_TILED_THIN1:
308 return evergreen_surface_check_2d(p, surf, prefix);
310 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
311 __func__, __LINE__, prefix, surf->mode);
317 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
318 struct eg_surface *surf,
321 switch (surf->mode) {
322 case ARRAY_2D_TILED_THIN1:
324 case ARRAY_LINEAR_GENERAL:
325 case ARRAY_LINEAR_ALIGNED:
326 case ARRAY_1D_TILED_THIN1:
329 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
330 __func__, __LINE__, prefix, surf->mode);
334 switch (surf->nbanks) {
335 case 0: surf->nbanks = 2; break;
336 case 1: surf->nbanks = 4; break;
337 case 2: surf->nbanks = 8; break;
338 case 3: surf->nbanks = 16; break;
340 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
341 __func__, __LINE__, prefix, surf->nbanks);
344 switch (surf->bankw) {
345 case 0: surf->bankw = 1; break;
346 case 1: surf->bankw = 2; break;
347 case 2: surf->bankw = 4; break;
348 case 3: surf->bankw = 8; break;
350 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
351 __func__, __LINE__, prefix, surf->bankw);
354 switch (surf->bankh) {
355 case 0: surf->bankh = 1; break;
356 case 1: surf->bankh = 2; break;
357 case 2: surf->bankh = 4; break;
358 case 3: surf->bankh = 8; break;
360 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
361 __func__, __LINE__, prefix, surf->bankh);
364 switch (surf->mtilea) {
365 case 0: surf->mtilea = 1; break;
366 case 1: surf->mtilea = 2; break;
367 case 2: surf->mtilea = 4; break;
368 case 3: surf->mtilea = 8; break;
370 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
371 __func__, __LINE__, prefix, surf->mtilea);
374 switch (surf->tsplit) {
375 case 0: surf->tsplit = 64; break;
376 case 1: surf->tsplit = 128; break;
377 case 2: surf->tsplit = 256; break;
378 case 3: surf->tsplit = 512; break;
379 case 4: surf->tsplit = 1024; break;
380 case 5: surf->tsplit = 2048; break;
381 case 6: surf->tsplit = 4096; break;
383 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
384 __func__, __LINE__, prefix, surf->tsplit);
390 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
392 struct evergreen_cs_track *track = p->track;
393 struct eg_surface surf;
394 unsigned pitch, slice, mslice;
395 unsigned long offset;
398 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
399 pitch = track->cb_color_pitch[id];
400 slice = track->cb_color_slice[id];
401 surf.nbx = (pitch + 1) * 8;
402 surf.nby = ((slice + 1) * 64) / surf.nbx;
403 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
404 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
405 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
406 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
407 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
408 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
409 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
412 if (!r600_fmt_is_valid_color(surf.format)) {
413 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
414 __func__, __LINE__, surf.format,
415 id, track->cb_color_info[id]);
419 r = evergreen_surface_value_conv_check(p, &surf, "cb");
424 r = evergreen_surface_check(p, &surf, "cb");
426 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
427 __func__, __LINE__, id, track->cb_color_pitch[id],
428 track->cb_color_slice[id], track->cb_color_attrib[id],
429 track->cb_color_info[id]);
433 offset = track->cb_color_bo_offset[id] << 8;
434 if (offset & (surf.base_align - 1)) {
435 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
436 __func__, __LINE__, id, offset, surf.base_align);
440 offset += surf.layer_size * mslice;
441 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
442 /* old ddx are broken they allocate bo with w*h*bpp but
443 * program slice with ALIGN(h, 8), catch this and patch
447 volatile u32 *ib = p->ib.ptr;
448 unsigned long tmp, nby, bsize, size, min = 0;
450 /* find the height the ddx wants */
454 bsize = radeon_bo_size(track->cb_color_bo[id]);
455 tmp = track->cb_color_bo_offset[id] << 8;
456 for (nby = surf.nby; nby > min; nby--) {
457 size = nby * surf.nbx * surf.bpe * surf.nsamples;
458 if ((tmp + size * mslice) <= bsize) {
464 slice = ((nby * surf.nbx) / 64) - 1;
465 if (!evergreen_surface_check(p, &surf, "cb")) {
466 /* check if this one works */
467 tmp += surf.layer_size * mslice;
469 ib[track->cb_color_slice_idx[id]] = slice;
475 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
476 "offset %d, max layer %d, bo size %ld, slice %d)\n",
477 __func__, __LINE__, id, surf.layer_size,
478 track->cb_color_bo_offset[id] << 8, mslice,
479 radeon_bo_size(track->cb_color_bo[id]), slice);
480 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
481 __func__, __LINE__, surf.nbx, surf.nby,
482 surf.mode, surf.bpe, surf.nsamples,
483 surf.bankw, surf.bankh,
484 surf.tsplit, surf.mtilea);
492 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
493 unsigned nbx, unsigned nby)
495 struct evergreen_cs_track *track = p->track;
498 if (track->htile_bo == NULL) {
499 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
500 __func__, __LINE__, track->db_z_info);
504 if (G_028ABC_LINEAR(track->htile_surface)) {
505 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
506 nbx = round_up(nbx, 16 * 8);
507 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
508 nby = round_up(nby, track->npipes * 8);
510 switch (track->npipes) {
512 nbx = round_up(nbx, 64 * 8);
513 nby = round_up(nby, 64 * 8);
516 nbx = round_up(nbx, 64 * 8);
517 nby = round_up(nby, 32 * 8);
520 nbx = round_up(nbx, 32 * 8);
521 nby = round_up(nby, 32 * 8);
524 nbx = round_up(nbx, 32 * 8);
525 nby = round_up(nby, 16 * 8);
528 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
529 __func__, __LINE__, track->npipes);
533 /* compute number of htile */
536 size = nbx * nby * 4;
537 size += track->htile_offset;
539 if (size > radeon_bo_size(track->htile_bo)) {
540 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
541 __func__, __LINE__, radeon_bo_size(track->htile_bo),
548 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
550 struct evergreen_cs_track *track = p->track;
551 struct eg_surface surf;
552 unsigned pitch, slice, mslice;
553 unsigned long offset;
556 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
557 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
558 slice = track->db_depth_slice;
559 surf.nbx = (pitch + 1) * 8;
560 surf.nby = ((slice + 1) * 64) / surf.nbx;
561 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
562 surf.format = G_028044_FORMAT(track->db_s_info);
563 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
564 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
565 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
566 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
567 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
570 if (surf.format != 1) {
571 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
572 __func__, __LINE__, surf.format);
575 /* replace by color format so we can use same code */
576 surf.format = V_028C70_COLOR_8;
578 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
583 r = evergreen_surface_check(p, &surf, NULL);
585 /* old userspace doesn't compute proper depth/stencil alignment
586 * check that alignment against a bigger byte per elements and
587 * only report if that alignment is wrong too.
589 surf.format = V_028C70_COLOR_8_8_8_8;
590 r = evergreen_surface_check(p, &surf, "stencil");
592 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
593 __func__, __LINE__, track->db_depth_size,
594 track->db_depth_slice, track->db_s_info, track->db_z_info);
599 offset = track->db_s_read_offset << 8;
600 if (offset & (surf.base_align - 1)) {
601 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
602 __func__, __LINE__, offset, surf.base_align);
605 offset += surf.layer_size * mslice;
606 if (offset > radeon_bo_size(track->db_s_read_bo)) {
607 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
608 "offset %ld, max layer %d, bo size %ld)\n",
609 __func__, __LINE__, surf.layer_size,
610 (unsigned long)track->db_s_read_offset << 8, mslice,
611 radeon_bo_size(track->db_s_read_bo));
612 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
613 __func__, __LINE__, track->db_depth_size,
614 track->db_depth_slice, track->db_s_info, track->db_z_info);
618 offset = track->db_s_write_offset << 8;
619 if (offset & (surf.base_align - 1)) {
620 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
621 __func__, __LINE__, offset, surf.base_align);
624 offset += surf.layer_size * mslice;
625 if (offset > radeon_bo_size(track->db_s_write_bo)) {
626 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
627 "offset %ld, max layer %d, bo size %ld)\n",
628 __func__, __LINE__, surf.layer_size,
629 (unsigned long)track->db_s_write_offset << 8, mslice,
630 radeon_bo_size(track->db_s_write_bo));
635 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
636 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
645 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
647 struct evergreen_cs_track *track = p->track;
648 struct eg_surface surf;
649 unsigned pitch, slice, mslice;
650 unsigned long offset;
653 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
654 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
655 slice = track->db_depth_slice;
656 surf.nbx = (pitch + 1) * 8;
657 surf.nby = ((slice + 1) * 64) / surf.nbx;
658 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
659 surf.format = G_028040_FORMAT(track->db_z_info);
660 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
661 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
662 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
663 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
664 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
667 switch (surf.format) {
669 surf.format = V_028C70_COLOR_16;
672 case V_028040_Z_32_FLOAT:
673 surf.format = V_028C70_COLOR_8_8_8_8;
676 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
677 __func__, __LINE__, surf.format);
681 r = evergreen_surface_value_conv_check(p, &surf, "depth");
683 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
684 __func__, __LINE__, track->db_depth_size,
685 track->db_depth_slice, track->db_z_info);
689 r = evergreen_surface_check(p, &surf, "depth");
691 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
692 __func__, __LINE__, track->db_depth_size,
693 track->db_depth_slice, track->db_z_info);
697 offset = track->db_z_read_offset << 8;
698 if (offset & (surf.base_align - 1)) {
699 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
700 __func__, __LINE__, offset, surf.base_align);
703 offset += surf.layer_size * mslice;
704 if (offset > radeon_bo_size(track->db_z_read_bo)) {
705 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
706 "offset %ld, max layer %d, bo size %ld)\n",
707 __func__, __LINE__, surf.layer_size,
708 (unsigned long)track->db_z_read_offset << 8, mslice,
709 radeon_bo_size(track->db_z_read_bo));
713 offset = track->db_z_write_offset << 8;
714 if (offset & (surf.base_align - 1)) {
715 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
716 __func__, __LINE__, offset, surf.base_align);
719 offset += surf.layer_size * mslice;
720 if (offset > radeon_bo_size(track->db_z_write_bo)) {
721 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
722 "offset %ld, max layer %d, bo size %ld)\n",
723 __func__, __LINE__, surf.layer_size,
724 (unsigned long)track->db_z_write_offset << 8, mslice,
725 radeon_bo_size(track->db_z_write_bo));
730 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
731 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
740 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
741 struct radeon_bo *texture,
742 struct radeon_bo *mipmap,
745 struct eg_surface surf;
746 unsigned long toffset, moffset;
747 unsigned dim, llevel, mslice, width, height, depth, i;
751 texdw[0] = radeon_get_ib_value(p, idx + 0);
752 texdw[1] = radeon_get_ib_value(p, idx + 1);
753 texdw[2] = radeon_get_ib_value(p, idx + 2);
754 texdw[3] = radeon_get_ib_value(p, idx + 3);
755 texdw[4] = radeon_get_ib_value(p, idx + 4);
756 texdw[5] = radeon_get_ib_value(p, idx + 5);
757 texdw[6] = radeon_get_ib_value(p, idx + 6);
758 texdw[7] = radeon_get_ib_value(p, idx + 7);
759 dim = G_030000_DIM(texdw[0]);
760 llevel = G_030014_LAST_LEVEL(texdw[5]);
761 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
762 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
763 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
764 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
765 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
766 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
767 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
768 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
769 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
770 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
771 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
772 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
773 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
774 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
776 toffset = texdw[2] << 8;
777 moffset = texdw[3] << 8;
779 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
780 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
781 __func__, __LINE__, surf.format);
785 case V_030000_SQ_TEX_DIM_1D:
786 case V_030000_SQ_TEX_DIM_2D:
787 case V_030000_SQ_TEX_DIM_CUBEMAP:
788 case V_030000_SQ_TEX_DIM_1D_ARRAY:
789 case V_030000_SQ_TEX_DIM_2D_ARRAY:
791 case V_030000_SQ_TEX_DIM_3D:
794 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
795 __func__, __LINE__, dim);
799 r = evergreen_surface_value_conv_check(p, &surf, "texture");
805 evergreen_surface_check(p, &surf, NULL);
806 surf.nby = ALIGN(surf.nby, surf.halign);
808 r = evergreen_surface_check(p, &surf, "texture");
810 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
811 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
812 texdw[5], texdw[6], texdw[7]);
816 /* check texture size */
817 if (toffset & (surf.base_align - 1)) {
818 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
819 __func__, __LINE__, toffset, surf.base_align);
822 if (moffset & (surf.base_align - 1)) {
823 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
824 __func__, __LINE__, moffset, surf.base_align);
827 if (dim == SQ_TEX_DIM_3D) {
828 toffset += surf.layer_size * depth;
830 toffset += surf.layer_size * mslice;
832 if (toffset > radeon_bo_size(texture)) {
833 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
834 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
835 __func__, __LINE__, surf.layer_size,
836 (unsigned long)texdw[2] << 8, mslice,
837 depth, radeon_bo_size(texture),
842 /* check mipmap size */
843 for (i = 1; i <= llevel; i++) {
846 w = r600_mip_minify(width, i);
847 h = r600_mip_minify(height, i);
848 d = r600_mip_minify(depth, i);
849 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
850 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
853 case ARRAY_2D_TILED_THIN1:
854 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
855 surf.mode = ARRAY_1D_TILED_THIN1;
857 /* recompute alignment */
858 evergreen_surface_check(p, &surf, NULL);
860 case ARRAY_LINEAR_GENERAL:
861 case ARRAY_LINEAR_ALIGNED:
862 case ARRAY_1D_TILED_THIN1:
865 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
866 __func__, __LINE__, surf.mode);
869 surf.nbx = ALIGN(surf.nbx, surf.palign);
870 surf.nby = ALIGN(surf.nby, surf.halign);
872 r = evergreen_surface_check(p, &surf, "mipmap");
877 if (dim == SQ_TEX_DIM_3D) {
878 moffset += surf.layer_size * d;
880 moffset += surf.layer_size * mslice;
882 if (moffset > radeon_bo_size(mipmap)) {
883 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
884 "offset %ld, coffset %ld, max layer %d, depth %d, "
885 "bo size %ld) level0 (%d %d %d)\n",
886 __func__, __LINE__, i, surf.layer_size,
887 (unsigned long)texdw[3] << 8, moffset, mslice,
888 d, radeon_bo_size(mipmap),
889 width, height, depth);
890 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
891 __func__, __LINE__, surf.nbx, surf.nby,
892 surf.mode, surf.bpe, surf.nsamples,
893 surf.bankw, surf.bankh,
894 surf.tsplit, surf.mtilea);
902 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
904 struct evergreen_cs_track *track = p->track;
907 unsigned buffer_mask = 0;
909 /* check streamout */
910 if (track->streamout_dirty && track->vgt_strmout_config) {
911 for (i = 0; i < 4; i++) {
912 if (track->vgt_strmout_config & (1 << i)) {
913 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
917 for (i = 0; i < 4; i++) {
918 if (buffer_mask & (1 << i)) {
919 if (track->vgt_strmout_bo[i]) {
920 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
921 (u64)track->vgt_strmout_size[i];
922 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
923 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
925 radeon_bo_size(track->vgt_strmout_bo[i]));
929 dev_warn(p->dev, "No buffer for streamout %d\n", i);
934 track->streamout_dirty = false;
937 if (track->sx_misc_kill_all_prims)
940 /* check that we have a cb for each enabled target
942 if (track->cb_dirty) {
943 tmp = track->cb_target_mask;
944 for (i = 0; i < 8; i++) {
945 if ((tmp >> (i * 4)) & 0xF) {
946 /* at least one component is enabled */
947 if (track->cb_color_bo[i] == NULL) {
948 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
949 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
953 r = evergreen_cs_track_validate_cb(p, i);
959 track->cb_dirty = false;
962 if (track->db_dirty) {
963 /* Check stencil buffer */
964 if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
965 r = evergreen_cs_track_validate_stencil(p);
969 /* Check depth buffer */
970 if (G_028800_Z_ENABLE(track->db_depth_control)) {
971 r = evergreen_cs_track_validate_depth(p);
975 track->db_dirty = false;
982 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
983 * @parser: parser structure holding parsing context.
984 * @pkt: where to store packet informations
986 * Assume that chunk_ib_index is properly set. Will return -EINVAL
987 * if packet is bigger than remaining ib size. or if packets is unknown.
989 int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
990 struct radeon_cs_packet *pkt,
993 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
996 if (idx >= ib_chunk->length_dw) {
997 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
998 idx, ib_chunk->length_dw);
1001 header = radeon_get_ib_value(p, idx);
1003 pkt->type = CP_PACKET_GET_TYPE(header);
1004 pkt->count = CP_PACKET_GET_COUNT(header);
1005 pkt->one_reg_wr = 0;
1006 switch (pkt->type) {
1008 pkt->reg = CP_PACKET0_GET_REG(header);
1011 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1017 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1020 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1021 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1022 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1029 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1030 * @parser: parser structure holding parsing context.
1031 * @data: pointer to relocation data
1032 * @offset_start: starting offset
1033 * @offset_mask: offset mask (to align start offset on)
1034 * @reloc: reloc informations
1036 * Check next packet is relocation packet3, do bo validation and compute
1037 * GPU offset using the provided start.
1039 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
1040 struct radeon_cs_reloc **cs_reloc)
1042 struct radeon_cs_chunk *relocs_chunk;
1043 struct radeon_cs_packet p3reloc;
1047 if (p->chunk_relocs_idx == -1) {
1048 DRM_ERROR("No relocation chunk !\n");
1052 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1053 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1057 p->idx += p3reloc.count + 2;
1058 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1059 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1063 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1064 if (idx >= relocs_chunk->length_dw) {
1065 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1066 idx, relocs_chunk->length_dw);
1069 /* FIXME: we assume reloc size is 4 dwords */
1070 *cs_reloc = p->relocs_ptr[(idx / 4)];
1075 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
1076 * @parser: parser structure holding parsing context.
1078 * Userspace sends a special sequence for VLINE waits.
1079 * PACKET0 - VLINE_START_END + value
1080 * PACKET3 - WAIT_REG_MEM poll vline status reg
1081 * RELOC (P3) - crtc_id in reloc.
1083 * This function parses this and relocates the VLINE START END
1084 * and WAIT_REG_MEM packets to the correct crtc.
1085 * It also detects a switched off crtc and nulls out the
1086 * wait in that case.
1088 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1090 struct drm_mode_object *obj;
1091 struct drm_crtc *crtc;
1092 struct radeon_crtc *radeon_crtc;
1093 struct radeon_cs_packet p3reloc, wait_reg_mem;
1096 uint32_t header, h_idx, reg, wait_reg_mem_info;
1097 volatile uint32_t *ib;
1101 /* parse the WAIT_REG_MEM */
1102 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
1106 /* check its a WAIT_REG_MEM */
1107 if (wait_reg_mem.type != PACKET_TYPE3 ||
1108 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
1109 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
1113 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
1114 /* bit 4 is reg (0) or mem (1) */
1115 if (wait_reg_mem_info & 0x10) {
1116 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1119 /* waiting for value to be equal */
1120 if ((wait_reg_mem_info & 0x7) != 0x3) {
1121 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1124 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
1125 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1129 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
1130 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1134 /* jump over the NOP */
1135 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1140 p->idx += wait_reg_mem.count + 2;
1141 p->idx += p3reloc.count + 2;
1143 header = radeon_get_ib_value(p, h_idx);
1144 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1145 reg = CP_PACKET0_GET_REG(header);
1146 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1148 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1151 crtc = obj_to_crtc(obj);
1152 radeon_crtc = to_radeon_crtc(crtc);
1153 crtc_id = radeon_crtc->crtc_id;
1155 if (!crtc->enabled) {
1156 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1157 ib[h_idx + 2] = PACKET2(0);
1158 ib[h_idx + 3] = PACKET2(0);
1159 ib[h_idx + 4] = PACKET2(0);
1160 ib[h_idx + 5] = PACKET2(0);
1161 ib[h_idx + 6] = PACKET2(0);
1162 ib[h_idx + 7] = PACKET2(0);
1163 ib[h_idx + 8] = PACKET2(0);
1166 case EVERGREEN_VLINE_START_END:
1167 header &= ~R600_CP_PACKET0_REG_MASK;
1168 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
1170 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
1173 DRM_ERROR("unknown crtc reloc\n");
1180 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1181 struct radeon_cs_packet *pkt,
1182 unsigned idx, unsigned reg)
1187 case EVERGREEN_VLINE_START_END:
1188 r = evergreen_cs_packet_parse_vline(p);
1190 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1196 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1203 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1204 struct radeon_cs_packet *pkt)
1212 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1213 r = evergreen_packet0_check(p, pkt, idx, reg);
1222 * evergreen_cs_check_reg() - check if register is authorized or not
1223 * @parser: parser structure holding parsing context
1224 * @reg: register we are testing
1225 * @idx: index into the cs buffer
1227 * This function will test against evergreen_reg_safe_bm and return 0
1228 * if register is safe. If register is not flag as safe this function
1229 * will test it against a list of register needind special handling.
1231 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1233 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1234 struct radeon_cs_reloc *reloc;
1239 if (p->rdev->family >= CHIP_CAYMAN)
1240 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1242 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1245 if (i >= last_reg) {
1246 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1249 m = 1 << ((reg >> 2) & 31);
1250 if (p->rdev->family >= CHIP_CAYMAN) {
1251 if (!(cayman_reg_safe_bm[i] & m))
1254 if (!(evergreen_reg_safe_bm[i] & m))
1259 /* force following reg to 0 in an attempt to disable out buffer
1260 * which will need us to better understand how it works to perform
1261 * security check on it (Jerome)
1263 case SQ_ESGS_RING_SIZE:
1264 case SQ_GSVS_RING_SIZE:
1265 case SQ_ESTMP_RING_SIZE:
1266 case SQ_GSTMP_RING_SIZE:
1267 case SQ_HSTMP_RING_SIZE:
1268 case SQ_LSTMP_RING_SIZE:
1269 case SQ_PSTMP_RING_SIZE:
1270 case SQ_VSTMP_RING_SIZE:
1271 case SQ_ESGS_RING_ITEMSIZE:
1272 case SQ_ESTMP_RING_ITEMSIZE:
1273 case SQ_GSTMP_RING_ITEMSIZE:
1274 case SQ_GSVS_RING_ITEMSIZE:
1275 case SQ_GS_VERT_ITEMSIZE:
1276 case SQ_GS_VERT_ITEMSIZE_1:
1277 case SQ_GS_VERT_ITEMSIZE_2:
1278 case SQ_GS_VERT_ITEMSIZE_3:
1279 case SQ_GSVS_RING_OFFSET_1:
1280 case SQ_GSVS_RING_OFFSET_2:
1281 case SQ_GSVS_RING_OFFSET_3:
1282 case SQ_HSTMP_RING_ITEMSIZE:
1283 case SQ_LSTMP_RING_ITEMSIZE:
1284 case SQ_PSTMP_RING_ITEMSIZE:
1285 case SQ_VSTMP_RING_ITEMSIZE:
1286 case VGT_TF_RING_SIZE:
1287 /* get value to populate the IB don't remove */
1288 /*tmp =radeon_get_ib_value(p, idx);
1291 case SQ_ESGS_RING_BASE:
1292 case SQ_GSVS_RING_BASE:
1293 case SQ_ESTMP_RING_BASE:
1294 case SQ_GSTMP_RING_BASE:
1295 case SQ_HSTMP_RING_BASE:
1296 case SQ_LSTMP_RING_BASE:
1297 case SQ_PSTMP_RING_BASE:
1298 case SQ_VSTMP_RING_BASE:
1299 r = evergreen_cs_packet_next_reloc(p, &reloc);
1301 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1305 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1307 case DB_DEPTH_CONTROL:
1308 track->db_depth_control = radeon_get_ib_value(p, idx);
1309 track->db_dirty = true;
1311 case CAYMAN_DB_EQAA:
1312 if (p->rdev->family < CHIP_CAYMAN) {
1313 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1318 case CAYMAN_DB_DEPTH_INFO:
1319 if (p->rdev->family < CHIP_CAYMAN) {
1320 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1326 track->db_z_info = radeon_get_ib_value(p, idx);
1327 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1328 r = evergreen_cs_packet_next_reloc(p, &reloc);
1330 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1334 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1335 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1336 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1337 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1338 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1339 unsigned bankw, bankh, mtaspect, tile_split;
1341 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1342 &bankw, &bankh, &mtaspect,
1344 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1345 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1346 DB_BANK_WIDTH(bankw) |
1347 DB_BANK_HEIGHT(bankh) |
1348 DB_MACRO_TILE_ASPECT(mtaspect);
1351 track->db_dirty = true;
1353 case DB_STENCIL_INFO:
1354 track->db_s_info = radeon_get_ib_value(p, idx);
1355 track->db_dirty = true;
1358 track->db_depth_view = radeon_get_ib_value(p, idx);
1359 track->db_dirty = true;
1362 track->db_depth_size = radeon_get_ib_value(p, idx);
1363 track->db_dirty = true;
1365 case R_02805C_DB_DEPTH_SLICE:
1366 track->db_depth_slice = radeon_get_ib_value(p, idx);
1367 track->db_dirty = true;
1369 case DB_Z_READ_BASE:
1370 r = evergreen_cs_packet_next_reloc(p, &reloc);
1372 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1376 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1377 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1378 track->db_z_read_bo = reloc->robj;
1379 track->db_dirty = true;
1381 case DB_Z_WRITE_BASE:
1382 r = evergreen_cs_packet_next_reloc(p, &reloc);
1384 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1388 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1389 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1390 track->db_z_write_bo = reloc->robj;
1391 track->db_dirty = true;
1393 case DB_STENCIL_READ_BASE:
1394 r = evergreen_cs_packet_next_reloc(p, &reloc);
1396 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1400 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1401 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1402 track->db_s_read_bo = reloc->robj;
1403 track->db_dirty = true;
1405 case DB_STENCIL_WRITE_BASE:
1406 r = evergreen_cs_packet_next_reloc(p, &reloc);
1408 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1412 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1413 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1414 track->db_s_write_bo = reloc->robj;
1415 track->db_dirty = true;
1417 case VGT_STRMOUT_CONFIG:
1418 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1419 track->streamout_dirty = true;
1421 case VGT_STRMOUT_BUFFER_CONFIG:
1422 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1423 track->streamout_dirty = true;
1425 case VGT_STRMOUT_BUFFER_BASE_0:
1426 case VGT_STRMOUT_BUFFER_BASE_1:
1427 case VGT_STRMOUT_BUFFER_BASE_2:
1428 case VGT_STRMOUT_BUFFER_BASE_3:
1429 r = evergreen_cs_packet_next_reloc(p, &reloc);
1431 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1435 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1436 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1437 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1438 track->vgt_strmout_bo[tmp] = reloc->robj;
1439 track->streamout_dirty = true;
1441 case VGT_STRMOUT_BUFFER_SIZE_0:
1442 case VGT_STRMOUT_BUFFER_SIZE_1:
1443 case VGT_STRMOUT_BUFFER_SIZE_2:
1444 case VGT_STRMOUT_BUFFER_SIZE_3:
1445 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1446 /* size in register is DWs, convert to bytes */
1447 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1448 track->streamout_dirty = true;
1451 r = evergreen_cs_packet_next_reloc(p, &reloc);
1453 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1457 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1458 case CB_TARGET_MASK:
1459 track->cb_target_mask = radeon_get_ib_value(p, idx);
1460 track->cb_dirty = true;
1462 case CB_SHADER_MASK:
1463 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1464 track->cb_dirty = true;
1466 case PA_SC_AA_CONFIG:
1467 if (p->rdev->family >= CHIP_CAYMAN) {
1468 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1472 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1473 track->nsamples = 1 << tmp;
1475 case CAYMAN_PA_SC_AA_CONFIG:
1476 if (p->rdev->family < CHIP_CAYMAN) {
1477 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1481 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1482 track->nsamples = 1 << tmp;
1484 case CB_COLOR0_VIEW:
1485 case CB_COLOR1_VIEW:
1486 case CB_COLOR2_VIEW:
1487 case CB_COLOR3_VIEW:
1488 case CB_COLOR4_VIEW:
1489 case CB_COLOR5_VIEW:
1490 case CB_COLOR6_VIEW:
1491 case CB_COLOR7_VIEW:
1492 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1493 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1494 track->cb_dirty = true;
1496 case CB_COLOR8_VIEW:
1497 case CB_COLOR9_VIEW:
1498 case CB_COLOR10_VIEW:
1499 case CB_COLOR11_VIEW:
1500 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1501 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1502 track->cb_dirty = true;
1504 case CB_COLOR0_INFO:
1505 case CB_COLOR1_INFO:
1506 case CB_COLOR2_INFO:
1507 case CB_COLOR3_INFO:
1508 case CB_COLOR4_INFO:
1509 case CB_COLOR5_INFO:
1510 case CB_COLOR6_INFO:
1511 case CB_COLOR7_INFO:
1512 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1513 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1514 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1515 r = evergreen_cs_packet_next_reloc(p, &reloc);
1517 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1521 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1522 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1524 track->cb_dirty = true;
1526 case CB_COLOR8_INFO:
1527 case CB_COLOR9_INFO:
1528 case CB_COLOR10_INFO:
1529 case CB_COLOR11_INFO:
1530 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1531 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1532 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1533 r = evergreen_cs_packet_next_reloc(p, &reloc);
1535 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1539 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1540 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1542 track->cb_dirty = true;
1544 case CB_COLOR0_PITCH:
1545 case CB_COLOR1_PITCH:
1546 case CB_COLOR2_PITCH:
1547 case CB_COLOR3_PITCH:
1548 case CB_COLOR4_PITCH:
1549 case CB_COLOR5_PITCH:
1550 case CB_COLOR6_PITCH:
1551 case CB_COLOR7_PITCH:
1552 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1553 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1554 track->cb_dirty = true;
1556 case CB_COLOR8_PITCH:
1557 case CB_COLOR9_PITCH:
1558 case CB_COLOR10_PITCH:
1559 case CB_COLOR11_PITCH:
1560 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1561 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1562 track->cb_dirty = true;
1564 case CB_COLOR0_SLICE:
1565 case CB_COLOR1_SLICE:
1566 case CB_COLOR2_SLICE:
1567 case CB_COLOR3_SLICE:
1568 case CB_COLOR4_SLICE:
1569 case CB_COLOR5_SLICE:
1570 case CB_COLOR6_SLICE:
1571 case CB_COLOR7_SLICE:
1572 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1573 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1574 track->cb_color_slice_idx[tmp] = idx;
1575 track->cb_dirty = true;
1577 case CB_COLOR8_SLICE:
1578 case CB_COLOR9_SLICE:
1579 case CB_COLOR10_SLICE:
1580 case CB_COLOR11_SLICE:
1581 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1582 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1583 track->cb_color_slice_idx[tmp] = idx;
1584 track->cb_dirty = true;
1586 case CB_COLOR0_ATTRIB:
1587 case CB_COLOR1_ATTRIB:
1588 case CB_COLOR2_ATTRIB:
1589 case CB_COLOR3_ATTRIB:
1590 case CB_COLOR4_ATTRIB:
1591 case CB_COLOR5_ATTRIB:
1592 case CB_COLOR6_ATTRIB:
1593 case CB_COLOR7_ATTRIB:
1594 r = evergreen_cs_packet_next_reloc(p, &reloc);
1596 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1600 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1601 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1602 unsigned bankw, bankh, mtaspect, tile_split;
1604 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1605 &bankw, &bankh, &mtaspect,
1607 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1608 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1609 CB_BANK_WIDTH(bankw) |
1610 CB_BANK_HEIGHT(bankh) |
1611 CB_MACRO_TILE_ASPECT(mtaspect);
1614 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1615 track->cb_color_attrib[tmp] = ib[idx];
1616 track->cb_dirty = true;
1618 case CB_COLOR8_ATTRIB:
1619 case CB_COLOR9_ATTRIB:
1620 case CB_COLOR10_ATTRIB:
1621 case CB_COLOR11_ATTRIB:
1622 r = evergreen_cs_packet_next_reloc(p, &reloc);
1624 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1628 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1630 unsigned bankw, bankh, mtaspect, tile_split;
1632 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1633 &bankw, &bankh, &mtaspect,
1635 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1636 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1637 CB_BANK_WIDTH(bankw) |
1638 CB_BANK_HEIGHT(bankh) |
1639 CB_MACRO_TILE_ASPECT(mtaspect);
1642 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1643 track->cb_color_attrib[tmp] = ib[idx];
1644 track->cb_dirty = true;
1646 case CB_COLOR0_FMASK:
1647 case CB_COLOR1_FMASK:
1648 case CB_COLOR2_FMASK:
1649 case CB_COLOR3_FMASK:
1650 case CB_COLOR4_FMASK:
1651 case CB_COLOR5_FMASK:
1652 case CB_COLOR6_FMASK:
1653 case CB_COLOR7_FMASK:
1654 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1655 r = evergreen_cs_packet_next_reloc(p, &reloc);
1657 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1660 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1661 track->cb_color_fmask_bo[tmp] = reloc->robj;
1663 case CB_COLOR0_CMASK:
1664 case CB_COLOR1_CMASK:
1665 case CB_COLOR2_CMASK:
1666 case CB_COLOR3_CMASK:
1667 case CB_COLOR4_CMASK:
1668 case CB_COLOR5_CMASK:
1669 case CB_COLOR6_CMASK:
1670 case CB_COLOR7_CMASK:
1671 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1672 r = evergreen_cs_packet_next_reloc(p, &reloc);
1674 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1677 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1678 track->cb_color_cmask_bo[tmp] = reloc->robj;
1680 case CB_COLOR0_FMASK_SLICE:
1681 case CB_COLOR1_FMASK_SLICE:
1682 case CB_COLOR2_FMASK_SLICE:
1683 case CB_COLOR3_FMASK_SLICE:
1684 case CB_COLOR4_FMASK_SLICE:
1685 case CB_COLOR5_FMASK_SLICE:
1686 case CB_COLOR6_FMASK_SLICE:
1687 case CB_COLOR7_FMASK_SLICE:
1688 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1689 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1691 case CB_COLOR0_CMASK_SLICE:
1692 case CB_COLOR1_CMASK_SLICE:
1693 case CB_COLOR2_CMASK_SLICE:
1694 case CB_COLOR3_CMASK_SLICE:
1695 case CB_COLOR4_CMASK_SLICE:
1696 case CB_COLOR5_CMASK_SLICE:
1697 case CB_COLOR6_CMASK_SLICE:
1698 case CB_COLOR7_CMASK_SLICE:
1699 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1700 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1702 case CB_COLOR0_BASE:
1703 case CB_COLOR1_BASE:
1704 case CB_COLOR2_BASE:
1705 case CB_COLOR3_BASE:
1706 case CB_COLOR4_BASE:
1707 case CB_COLOR5_BASE:
1708 case CB_COLOR6_BASE:
1709 case CB_COLOR7_BASE:
1710 r = evergreen_cs_packet_next_reloc(p, &reloc);
1712 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1716 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1717 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1718 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1719 track->cb_color_bo[tmp] = reloc->robj;
1720 track->cb_dirty = true;
1722 case CB_COLOR8_BASE:
1723 case CB_COLOR9_BASE:
1724 case CB_COLOR10_BASE:
1725 case CB_COLOR11_BASE:
1726 r = evergreen_cs_packet_next_reloc(p, &reloc);
1728 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1732 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1733 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1734 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1735 track->cb_color_bo[tmp] = reloc->robj;
1736 track->cb_dirty = true;
1738 case DB_HTILE_DATA_BASE:
1739 r = evergreen_cs_packet_next_reloc(p, &reloc);
1741 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1745 track->htile_offset = radeon_get_ib_value(p, idx);
1746 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1747 track->htile_bo = reloc->robj;
1748 track->db_dirty = true;
1750 case DB_HTILE_SURFACE:
1752 track->htile_surface = radeon_get_ib_value(p, idx);
1753 track->db_dirty = true;
1755 case CB_IMMED0_BASE:
1756 case CB_IMMED1_BASE:
1757 case CB_IMMED2_BASE:
1758 case CB_IMMED3_BASE:
1759 case CB_IMMED4_BASE:
1760 case CB_IMMED5_BASE:
1761 case CB_IMMED6_BASE:
1762 case CB_IMMED7_BASE:
1763 case CB_IMMED8_BASE:
1764 case CB_IMMED9_BASE:
1765 case CB_IMMED10_BASE:
1766 case CB_IMMED11_BASE:
1767 case SQ_PGM_START_FS:
1768 case SQ_PGM_START_ES:
1769 case SQ_PGM_START_VS:
1770 case SQ_PGM_START_GS:
1771 case SQ_PGM_START_PS:
1772 case SQ_PGM_START_HS:
1773 case SQ_PGM_START_LS:
1774 case SQ_CONST_MEM_BASE:
1775 case SQ_ALU_CONST_CACHE_GS_0:
1776 case SQ_ALU_CONST_CACHE_GS_1:
1777 case SQ_ALU_CONST_CACHE_GS_2:
1778 case SQ_ALU_CONST_CACHE_GS_3:
1779 case SQ_ALU_CONST_CACHE_GS_4:
1780 case SQ_ALU_CONST_CACHE_GS_5:
1781 case SQ_ALU_CONST_CACHE_GS_6:
1782 case SQ_ALU_CONST_CACHE_GS_7:
1783 case SQ_ALU_CONST_CACHE_GS_8:
1784 case SQ_ALU_CONST_CACHE_GS_9:
1785 case SQ_ALU_CONST_CACHE_GS_10:
1786 case SQ_ALU_CONST_CACHE_GS_11:
1787 case SQ_ALU_CONST_CACHE_GS_12:
1788 case SQ_ALU_CONST_CACHE_GS_13:
1789 case SQ_ALU_CONST_CACHE_GS_14:
1790 case SQ_ALU_CONST_CACHE_GS_15:
1791 case SQ_ALU_CONST_CACHE_PS_0:
1792 case SQ_ALU_CONST_CACHE_PS_1:
1793 case SQ_ALU_CONST_CACHE_PS_2:
1794 case SQ_ALU_CONST_CACHE_PS_3:
1795 case SQ_ALU_CONST_CACHE_PS_4:
1796 case SQ_ALU_CONST_CACHE_PS_5:
1797 case SQ_ALU_CONST_CACHE_PS_6:
1798 case SQ_ALU_CONST_CACHE_PS_7:
1799 case SQ_ALU_CONST_CACHE_PS_8:
1800 case SQ_ALU_CONST_CACHE_PS_9:
1801 case SQ_ALU_CONST_CACHE_PS_10:
1802 case SQ_ALU_CONST_CACHE_PS_11:
1803 case SQ_ALU_CONST_CACHE_PS_12:
1804 case SQ_ALU_CONST_CACHE_PS_13:
1805 case SQ_ALU_CONST_CACHE_PS_14:
1806 case SQ_ALU_CONST_CACHE_PS_15:
1807 case SQ_ALU_CONST_CACHE_VS_0:
1808 case SQ_ALU_CONST_CACHE_VS_1:
1809 case SQ_ALU_CONST_CACHE_VS_2:
1810 case SQ_ALU_CONST_CACHE_VS_3:
1811 case SQ_ALU_CONST_CACHE_VS_4:
1812 case SQ_ALU_CONST_CACHE_VS_5:
1813 case SQ_ALU_CONST_CACHE_VS_6:
1814 case SQ_ALU_CONST_CACHE_VS_7:
1815 case SQ_ALU_CONST_CACHE_VS_8:
1816 case SQ_ALU_CONST_CACHE_VS_9:
1817 case SQ_ALU_CONST_CACHE_VS_10:
1818 case SQ_ALU_CONST_CACHE_VS_11:
1819 case SQ_ALU_CONST_CACHE_VS_12:
1820 case SQ_ALU_CONST_CACHE_VS_13:
1821 case SQ_ALU_CONST_CACHE_VS_14:
1822 case SQ_ALU_CONST_CACHE_VS_15:
1823 case SQ_ALU_CONST_CACHE_HS_0:
1824 case SQ_ALU_CONST_CACHE_HS_1:
1825 case SQ_ALU_CONST_CACHE_HS_2:
1826 case SQ_ALU_CONST_CACHE_HS_3:
1827 case SQ_ALU_CONST_CACHE_HS_4:
1828 case SQ_ALU_CONST_CACHE_HS_5:
1829 case SQ_ALU_CONST_CACHE_HS_6:
1830 case SQ_ALU_CONST_CACHE_HS_7:
1831 case SQ_ALU_CONST_CACHE_HS_8:
1832 case SQ_ALU_CONST_CACHE_HS_9:
1833 case SQ_ALU_CONST_CACHE_HS_10:
1834 case SQ_ALU_CONST_CACHE_HS_11:
1835 case SQ_ALU_CONST_CACHE_HS_12:
1836 case SQ_ALU_CONST_CACHE_HS_13:
1837 case SQ_ALU_CONST_CACHE_HS_14:
1838 case SQ_ALU_CONST_CACHE_HS_15:
1839 case SQ_ALU_CONST_CACHE_LS_0:
1840 case SQ_ALU_CONST_CACHE_LS_1:
1841 case SQ_ALU_CONST_CACHE_LS_2:
1842 case SQ_ALU_CONST_CACHE_LS_3:
1843 case SQ_ALU_CONST_CACHE_LS_4:
1844 case SQ_ALU_CONST_CACHE_LS_5:
1845 case SQ_ALU_CONST_CACHE_LS_6:
1846 case SQ_ALU_CONST_CACHE_LS_7:
1847 case SQ_ALU_CONST_CACHE_LS_8:
1848 case SQ_ALU_CONST_CACHE_LS_9:
1849 case SQ_ALU_CONST_CACHE_LS_10:
1850 case SQ_ALU_CONST_CACHE_LS_11:
1851 case SQ_ALU_CONST_CACHE_LS_12:
1852 case SQ_ALU_CONST_CACHE_LS_13:
1853 case SQ_ALU_CONST_CACHE_LS_14:
1854 case SQ_ALU_CONST_CACHE_LS_15:
1855 r = evergreen_cs_packet_next_reloc(p, &reloc);
1857 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1861 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1863 case SX_MEMORY_EXPORT_BASE:
1864 if (p->rdev->family >= CHIP_CAYMAN) {
1865 dev_warn(p->dev, "bad SET_CONFIG_REG "
1869 r = evergreen_cs_packet_next_reloc(p, &reloc);
1871 dev_warn(p->dev, "bad SET_CONFIG_REG "
1875 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1877 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1878 if (p->rdev->family < CHIP_CAYMAN) {
1879 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1883 r = evergreen_cs_packet_next_reloc(p, &reloc);
1885 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1889 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1892 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1895 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1901 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1905 if (p->rdev->family >= CHIP_CAYMAN)
1906 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1908 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1911 if (i >= last_reg) {
1912 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1915 m = 1 << ((reg >> 2) & 31);
1916 if (p->rdev->family >= CHIP_CAYMAN) {
1917 if (!(cayman_reg_safe_bm[i] & m))
1920 if (!(evergreen_reg_safe_bm[i] & m))
1923 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1927 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1928 struct radeon_cs_packet *pkt)
1930 struct radeon_cs_reloc *reloc;
1931 struct evergreen_cs_track *track;
1935 unsigned start_reg, end_reg, reg;
1939 track = (struct evergreen_cs_track *)p->track;
1942 idx_value = radeon_get_ib_value(p, idx);
1944 switch (pkt->opcode) {
1945 case PACKET3_SET_PREDICATION:
1951 if (pkt->count != 1) {
1952 DRM_ERROR("bad SET PREDICATION\n");
1956 tmp = radeon_get_ib_value(p, idx + 1);
1957 pred_op = (tmp >> 16) & 0x7;
1959 /* for the clear predicate operation */
1964 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1968 r = evergreen_cs_packet_next_reloc(p, &reloc);
1970 DRM_ERROR("bad SET PREDICATION\n");
1974 offset = reloc->lobj.gpu_offset +
1975 (idx_value & 0xfffffff0) +
1976 ((u64)(tmp & 0xff) << 32);
1978 ib[idx + 0] = offset;
1979 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1982 case PACKET3_CONTEXT_CONTROL:
1983 if (pkt->count != 1) {
1984 DRM_ERROR("bad CONTEXT_CONTROL\n");
1988 case PACKET3_INDEX_TYPE:
1989 case PACKET3_NUM_INSTANCES:
1990 case PACKET3_CLEAR_STATE:
1992 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1996 case CAYMAN_PACKET3_DEALLOC_STATE:
1997 if (p->rdev->family < CHIP_CAYMAN) {
1998 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
2002 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
2006 case PACKET3_INDEX_BASE:
2010 if (pkt->count != 1) {
2011 DRM_ERROR("bad INDEX_BASE\n");
2014 r = evergreen_cs_packet_next_reloc(p, &reloc);
2016 DRM_ERROR("bad INDEX_BASE\n");
2020 offset = reloc->lobj.gpu_offset +
2022 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2025 ib[idx+1] = upper_32_bits(offset) & 0xff;
2027 r = evergreen_cs_track_check(p);
2029 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2034 case PACKET3_DRAW_INDEX:
2037 if (pkt->count != 3) {
2038 DRM_ERROR("bad DRAW_INDEX\n");
2041 r = evergreen_cs_packet_next_reloc(p, &reloc);
2043 DRM_ERROR("bad DRAW_INDEX\n");
2047 offset = reloc->lobj.gpu_offset +
2049 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2052 ib[idx+1] = upper_32_bits(offset) & 0xff;
2054 r = evergreen_cs_track_check(p);
2056 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2061 case PACKET3_DRAW_INDEX_2:
2065 if (pkt->count != 4) {
2066 DRM_ERROR("bad DRAW_INDEX_2\n");
2069 r = evergreen_cs_packet_next_reloc(p, &reloc);
2071 DRM_ERROR("bad DRAW_INDEX_2\n");
2075 offset = reloc->lobj.gpu_offset +
2076 radeon_get_ib_value(p, idx+1) +
2077 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2080 ib[idx+2] = upper_32_bits(offset) & 0xff;
2082 r = evergreen_cs_track_check(p);
2084 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2089 case PACKET3_DRAW_INDEX_AUTO:
2090 if (pkt->count != 1) {
2091 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
2094 r = evergreen_cs_track_check(p);
2096 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2100 case PACKET3_DRAW_INDEX_MULTI_AUTO:
2101 if (pkt->count != 2) {
2102 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
2105 r = evergreen_cs_track_check(p);
2107 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2111 case PACKET3_DRAW_INDEX_IMMD:
2112 if (pkt->count < 2) {
2113 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
2116 r = evergreen_cs_track_check(p);
2118 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2122 case PACKET3_DRAW_INDEX_OFFSET:
2123 if (pkt->count != 2) {
2124 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
2127 r = evergreen_cs_track_check(p);
2129 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2133 case PACKET3_DRAW_INDEX_OFFSET_2:
2134 if (pkt->count != 3) {
2135 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2138 r = evergreen_cs_track_check(p);
2140 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2144 case PACKET3_DISPATCH_DIRECT:
2145 if (pkt->count != 3) {
2146 DRM_ERROR("bad DISPATCH_DIRECT\n");
2149 r = evergreen_cs_track_check(p);
2151 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2155 case PACKET3_DISPATCH_INDIRECT:
2156 if (pkt->count != 1) {
2157 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2160 r = evergreen_cs_packet_next_reloc(p, &reloc);
2162 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2165 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2166 r = evergreen_cs_track_check(p);
2168 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2172 case PACKET3_WAIT_REG_MEM:
2173 if (pkt->count != 5) {
2174 DRM_ERROR("bad WAIT_REG_MEM\n");
2177 /* bit 4 is reg (0) or mem (1) */
2178 if (idx_value & 0x10) {
2181 r = evergreen_cs_packet_next_reloc(p, &reloc);
2183 DRM_ERROR("bad WAIT_REG_MEM\n");
2187 offset = reloc->lobj.gpu_offset +
2188 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2189 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2191 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2192 ib[idx+2] = upper_32_bits(offset) & 0xff;
2195 case PACKET3_SURFACE_SYNC:
2196 if (pkt->count != 3) {
2197 DRM_ERROR("bad SURFACE_SYNC\n");
2200 /* 0xffffffff/0x0 is flush all cache flag */
2201 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2202 radeon_get_ib_value(p, idx + 2) != 0) {
2203 r = evergreen_cs_packet_next_reloc(p, &reloc);
2205 DRM_ERROR("bad SURFACE_SYNC\n");
2208 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2211 case PACKET3_EVENT_WRITE:
2212 if (pkt->count != 2 && pkt->count != 0) {
2213 DRM_ERROR("bad EVENT_WRITE\n");
2219 r = evergreen_cs_packet_next_reloc(p, &reloc);
2221 DRM_ERROR("bad EVENT_WRITE\n");
2224 offset = reloc->lobj.gpu_offset +
2225 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2226 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2228 ib[idx+1] = offset & 0xfffffff8;
2229 ib[idx+2] = upper_32_bits(offset) & 0xff;
2232 case PACKET3_EVENT_WRITE_EOP:
2236 if (pkt->count != 4) {
2237 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2240 r = evergreen_cs_packet_next_reloc(p, &reloc);
2242 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2246 offset = reloc->lobj.gpu_offset +
2247 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2248 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2250 ib[idx+1] = offset & 0xfffffffc;
2251 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2254 case PACKET3_EVENT_WRITE_EOS:
2258 if (pkt->count != 3) {
2259 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2262 r = evergreen_cs_packet_next_reloc(p, &reloc);
2264 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2268 offset = reloc->lobj.gpu_offset +
2269 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2270 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2272 ib[idx+1] = offset & 0xfffffffc;
2273 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2276 case PACKET3_SET_CONFIG_REG:
2277 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2278 end_reg = 4 * pkt->count + start_reg - 4;
2279 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2280 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2281 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2282 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2285 for (i = 0; i < pkt->count; i++) {
2286 reg = start_reg + (4 * i);
2287 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2292 case PACKET3_SET_CONTEXT_REG:
2293 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2294 end_reg = 4 * pkt->count + start_reg - 4;
2295 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2296 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2297 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2298 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2301 for (i = 0; i < pkt->count; i++) {
2302 reg = start_reg + (4 * i);
2303 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2308 case PACKET3_SET_RESOURCE:
2309 if (pkt->count % 8) {
2310 DRM_ERROR("bad SET_RESOURCE\n");
2313 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2314 end_reg = 4 * pkt->count + start_reg - 4;
2315 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2316 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2317 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2318 DRM_ERROR("bad SET_RESOURCE\n");
2321 for (i = 0; i < (pkt->count / 8); i++) {
2322 struct radeon_bo *texture, *mipmap;
2323 u32 toffset, moffset;
2326 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2327 case SQ_TEX_VTX_VALID_TEXTURE:
2329 r = evergreen_cs_packet_next_reloc(p, &reloc);
2331 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2334 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2335 ib[idx+1+(i*8)+1] |=
2336 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2337 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2338 unsigned bankw, bankh, mtaspect, tile_split;
2340 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2341 &bankw, &bankh, &mtaspect,
2343 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2344 ib[idx+1+(i*8)+7] |=
2345 TEX_BANK_WIDTH(bankw) |
2346 TEX_BANK_HEIGHT(bankh) |
2347 MACRO_TILE_ASPECT(mtaspect) |
2348 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2351 texture = reloc->robj;
2352 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2354 r = evergreen_cs_packet_next_reloc(p, &reloc);
2356 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2359 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2360 mipmap = reloc->robj;
2361 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2364 ib[idx+1+(i*8)+2] += toffset;
2365 ib[idx+1+(i*8)+3] += moffset;
2367 case SQ_TEX_VTX_VALID_BUFFER:
2371 r = evergreen_cs_packet_next_reloc(p, &reloc);
2373 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2376 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2377 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2378 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2379 /* force size to size of the buffer */
2380 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2381 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2384 offset64 = reloc->lobj.gpu_offset + offset;
2385 ib[idx+1+(i*8)+0] = offset64;
2386 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2387 (upper_32_bits(offset64) & 0xff);
2390 case SQ_TEX_VTX_INVALID_TEXTURE:
2391 case SQ_TEX_VTX_INVALID_BUFFER:
2393 DRM_ERROR("bad SET_RESOURCE\n");
2398 case PACKET3_SET_ALU_CONST:
2399 /* XXX fix me ALU const buffers only */
2401 case PACKET3_SET_BOOL_CONST:
2402 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2403 end_reg = 4 * pkt->count + start_reg - 4;
2404 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2405 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2406 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2407 DRM_ERROR("bad SET_BOOL_CONST\n");
2411 case PACKET3_SET_LOOP_CONST:
2412 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2413 end_reg = 4 * pkt->count + start_reg - 4;
2414 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2415 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2416 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2417 DRM_ERROR("bad SET_LOOP_CONST\n");
2421 case PACKET3_SET_CTL_CONST:
2422 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2423 end_reg = 4 * pkt->count + start_reg - 4;
2424 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2425 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2426 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2427 DRM_ERROR("bad SET_CTL_CONST\n");
2431 case PACKET3_SET_SAMPLER:
2432 if (pkt->count % 3) {
2433 DRM_ERROR("bad SET_SAMPLER\n");
2436 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2437 end_reg = 4 * pkt->count + start_reg - 4;
2438 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2439 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2440 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2441 DRM_ERROR("bad SET_SAMPLER\n");
2445 case PACKET3_STRMOUT_BUFFER_UPDATE:
2446 if (pkt->count != 4) {
2447 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2450 /* Updating memory at DST_ADDRESS. */
2451 if (idx_value & 0x1) {
2453 r = evergreen_cs_packet_next_reloc(p, &reloc);
2455 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2458 offset = radeon_get_ib_value(p, idx+1);
2459 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2460 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2461 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2462 offset + 4, radeon_bo_size(reloc->robj));
2465 offset += reloc->lobj.gpu_offset;
2467 ib[idx+2] = upper_32_bits(offset) & 0xff;
2469 /* Reading data from SRC_ADDRESS. */
2470 if (((idx_value >> 1) & 0x3) == 2) {
2472 r = evergreen_cs_packet_next_reloc(p, &reloc);
2474 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2477 offset = radeon_get_ib_value(p, idx+3);
2478 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2479 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2480 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2481 offset + 4, radeon_bo_size(reloc->robj));
2484 offset += reloc->lobj.gpu_offset;
2486 ib[idx+4] = upper_32_bits(offset) & 0xff;
2489 case PACKET3_COPY_DW:
2490 if (pkt->count != 4) {
2491 DRM_ERROR("bad COPY_DW (invalid count)\n");
2494 if (idx_value & 0x1) {
2496 /* SRC is memory. */
2497 r = evergreen_cs_packet_next_reloc(p, &reloc);
2499 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2502 offset = radeon_get_ib_value(p, idx+1);
2503 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2504 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2505 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2506 offset + 4, radeon_bo_size(reloc->robj));
2509 offset += reloc->lobj.gpu_offset;
2511 ib[idx+2] = upper_32_bits(offset) & 0xff;
2514 reg = radeon_get_ib_value(p, idx+1) << 2;
2515 if (!evergreen_is_safe_reg(p, reg, idx+1))
2518 if (idx_value & 0x2) {
2520 /* DST is memory. */
2521 r = evergreen_cs_packet_next_reloc(p, &reloc);
2523 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2526 offset = radeon_get_ib_value(p, idx+3);
2527 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2528 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2529 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2530 offset + 4, radeon_bo_size(reloc->robj));
2533 offset += reloc->lobj.gpu_offset;
2535 ib[idx+4] = upper_32_bits(offset) & 0xff;
2538 reg = radeon_get_ib_value(p, idx+3) << 2;
2539 if (!evergreen_is_safe_reg(p, reg, idx+3))
2546 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2552 int evergreen_cs_parse(struct radeon_cs_parser *p)
2554 struct radeon_cs_packet pkt;
2555 struct evergreen_cs_track *track;
2559 if (p->track == NULL) {
2560 /* initialize tracker, we are in kms */
2561 track = kzalloc(sizeof(*track), GFP_KERNEL);
2564 evergreen_cs_track_init(track);
2565 if (p->rdev->family >= CHIP_CAYMAN)
2566 tmp = p->rdev->config.cayman.tile_config;
2568 tmp = p->rdev->config.evergreen.tile_config;
2570 switch (tmp & 0xf) {
2586 switch ((tmp & 0xf0) >> 4) {
2599 switch ((tmp & 0xf00) >> 8) {
2601 track->group_size = 256;
2605 track->group_size = 512;
2609 switch ((tmp & 0xf000) >> 12) {
2611 track->row_size = 1;
2615 track->row_size = 2;
2618 track->row_size = 4;
2625 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
2631 p->idx += pkt.count + 2;
2634 r = evergreen_cs_parse_packet0(p, &pkt);
2639 r = evergreen_packet3_check(p, &pkt);
2642 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2652 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2654 for (r = 0; r < p->ib.length_dw; r++) {
2655 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2665 static bool evergreen_vm_reg_valid(u32 reg)
2667 /* context regs are fine */
2671 /* check config regs */
2673 case GRBM_GFX_INDEX:
2674 case VGT_VTX_VECT_EJECT_REG:
2675 case VGT_CACHE_INVALIDATION:
2676 case VGT_GS_VERTEX_REUSE:
2677 case VGT_PRIMITIVE_TYPE:
2678 case VGT_INDEX_TYPE:
2679 case VGT_NUM_INDICES:
2680 case VGT_NUM_INSTANCES:
2681 case VGT_COMPUTE_DIM_X:
2682 case VGT_COMPUTE_DIM_Y:
2683 case VGT_COMPUTE_DIM_Z:
2684 case VGT_COMPUTE_START_X:
2685 case VGT_COMPUTE_START_Y:
2686 case VGT_COMPUTE_START_Z:
2687 case VGT_COMPUTE_INDEX:
2688 case VGT_COMPUTE_THREAD_GROUP_SIZE:
2689 case VGT_HS_OFFCHIP_PARAM:
2691 case PA_SU_LINE_STIPPLE_VALUE:
2692 case PA_SC_LINE_STIPPLE_STATE:
2694 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
2695 case SQ_DYN_GPR_SIMD_LOCK_EN:
2697 case SQ_GPR_RESOURCE_MGMT_1:
2698 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
2699 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
2700 case SQ_CONST_MEM_BASE:
2701 case SQ_STATIC_THREAD_MGMT_1:
2702 case SQ_STATIC_THREAD_MGMT_2:
2703 case SQ_STATIC_THREAD_MGMT_3:
2704 case SPI_CONFIG_CNTL:
2705 case SPI_CONFIG_CNTL_1:
2712 case TD_PS_BORDER_COLOR_INDEX:
2713 case TD_PS_BORDER_COLOR_RED:
2714 case TD_PS_BORDER_COLOR_GREEN:
2715 case TD_PS_BORDER_COLOR_BLUE:
2716 case TD_PS_BORDER_COLOR_ALPHA:
2717 case TD_VS_BORDER_COLOR_INDEX:
2718 case TD_VS_BORDER_COLOR_RED:
2719 case TD_VS_BORDER_COLOR_GREEN:
2720 case TD_VS_BORDER_COLOR_BLUE:
2721 case TD_VS_BORDER_COLOR_ALPHA:
2722 case TD_GS_BORDER_COLOR_INDEX:
2723 case TD_GS_BORDER_COLOR_RED:
2724 case TD_GS_BORDER_COLOR_GREEN:
2725 case TD_GS_BORDER_COLOR_BLUE:
2726 case TD_GS_BORDER_COLOR_ALPHA:
2727 case TD_HS_BORDER_COLOR_INDEX:
2728 case TD_HS_BORDER_COLOR_RED:
2729 case TD_HS_BORDER_COLOR_GREEN:
2730 case TD_HS_BORDER_COLOR_BLUE:
2731 case TD_HS_BORDER_COLOR_ALPHA:
2732 case TD_LS_BORDER_COLOR_INDEX:
2733 case TD_LS_BORDER_COLOR_RED:
2734 case TD_LS_BORDER_COLOR_GREEN:
2735 case TD_LS_BORDER_COLOR_BLUE:
2736 case TD_LS_BORDER_COLOR_ALPHA:
2737 case TD_CS_BORDER_COLOR_INDEX:
2738 case TD_CS_BORDER_COLOR_RED:
2739 case TD_CS_BORDER_COLOR_GREEN:
2740 case TD_CS_BORDER_COLOR_BLUE:
2741 case TD_CS_BORDER_COLOR_ALPHA:
2742 case SQ_ESGS_RING_SIZE:
2743 case SQ_GSVS_RING_SIZE:
2744 case SQ_ESTMP_RING_SIZE:
2745 case SQ_GSTMP_RING_SIZE:
2746 case SQ_HSTMP_RING_SIZE:
2747 case SQ_LSTMP_RING_SIZE:
2748 case SQ_PSTMP_RING_SIZE:
2749 case SQ_VSTMP_RING_SIZE:
2750 case SQ_ESGS_RING_ITEMSIZE:
2751 case SQ_ESTMP_RING_ITEMSIZE:
2752 case SQ_GSTMP_RING_ITEMSIZE:
2753 case SQ_GSVS_RING_ITEMSIZE:
2754 case SQ_GS_VERT_ITEMSIZE:
2755 case SQ_GS_VERT_ITEMSIZE_1:
2756 case SQ_GS_VERT_ITEMSIZE_2:
2757 case SQ_GS_VERT_ITEMSIZE_3:
2758 case SQ_GSVS_RING_OFFSET_1:
2759 case SQ_GSVS_RING_OFFSET_2:
2760 case SQ_GSVS_RING_OFFSET_3:
2761 case SQ_HSTMP_RING_ITEMSIZE:
2762 case SQ_LSTMP_RING_ITEMSIZE:
2763 case SQ_PSTMP_RING_ITEMSIZE:
2764 case SQ_VSTMP_RING_ITEMSIZE:
2765 case VGT_TF_RING_SIZE:
2766 case SQ_ESGS_RING_BASE:
2767 case SQ_GSVS_RING_BASE:
2768 case SQ_ESTMP_RING_BASE:
2769 case SQ_GSTMP_RING_BASE:
2770 case SQ_HSTMP_RING_BASE:
2771 case SQ_LSTMP_RING_BASE:
2772 case SQ_PSTMP_RING_BASE:
2773 case SQ_VSTMP_RING_BASE:
2774 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
2775 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2782 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2783 u32 *ib, struct radeon_cs_packet *pkt)
2785 u32 idx = pkt->idx + 1;
2786 u32 idx_value = ib[idx];
2787 u32 start_reg, end_reg, reg, i;
2789 switch (pkt->opcode) {
2791 case PACKET3_SET_BASE:
2792 case PACKET3_CLEAR_STATE:
2793 case PACKET3_INDEX_BUFFER_SIZE:
2794 case PACKET3_DISPATCH_DIRECT:
2795 case PACKET3_DISPATCH_INDIRECT:
2796 case PACKET3_MODE_CONTROL:
2797 case PACKET3_SET_PREDICATION:
2798 case PACKET3_COND_EXEC:
2799 case PACKET3_PRED_EXEC:
2800 case PACKET3_DRAW_INDIRECT:
2801 case PACKET3_DRAW_INDEX_INDIRECT:
2802 case PACKET3_INDEX_BASE:
2803 case PACKET3_DRAW_INDEX_2:
2804 case PACKET3_CONTEXT_CONTROL:
2805 case PACKET3_DRAW_INDEX_OFFSET:
2806 case PACKET3_INDEX_TYPE:
2807 case PACKET3_DRAW_INDEX:
2808 case PACKET3_DRAW_INDEX_AUTO:
2809 case PACKET3_DRAW_INDEX_IMMD:
2810 case PACKET3_NUM_INSTANCES:
2811 case PACKET3_DRAW_INDEX_MULTI_AUTO:
2812 case PACKET3_STRMOUT_BUFFER_UPDATE:
2813 case PACKET3_DRAW_INDEX_OFFSET_2:
2814 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
2815 case PACKET3_MPEG_INDEX:
2816 case PACKET3_WAIT_REG_MEM:
2817 case PACKET3_MEM_WRITE:
2818 case PACKET3_SURFACE_SYNC:
2819 case PACKET3_EVENT_WRITE:
2820 case PACKET3_EVENT_WRITE_EOP:
2821 case PACKET3_EVENT_WRITE_EOS:
2822 case PACKET3_SET_CONTEXT_REG:
2823 case PACKET3_SET_BOOL_CONST:
2824 case PACKET3_SET_LOOP_CONST:
2825 case PACKET3_SET_RESOURCE:
2826 case PACKET3_SET_SAMPLER:
2827 case PACKET3_SET_CTL_CONST:
2828 case PACKET3_SET_RESOURCE_OFFSET:
2829 case PACKET3_SET_CONTEXT_REG_INDIRECT:
2830 case PACKET3_SET_RESOURCE_INDIRECT:
2831 case CAYMAN_PACKET3_DEALLOC_STATE:
2833 case PACKET3_COND_WRITE:
2834 if (idx_value & 0x100) {
2835 reg = ib[idx + 5] * 4;
2836 if (!evergreen_vm_reg_valid(reg))
2840 case PACKET3_COPY_DW:
2841 if (idx_value & 0x2) {
2842 reg = ib[idx + 3] * 4;
2843 if (!evergreen_vm_reg_valid(reg))
2847 case PACKET3_SET_CONFIG_REG:
2848 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2849 end_reg = 4 * pkt->count + start_reg - 4;
2850 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2851 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2852 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2853 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2856 for (i = 0; i < pkt->count; i++) {
2857 reg = start_reg + (4 * i);
2858 if (!evergreen_vm_reg_valid(reg))
2868 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2872 struct radeon_cs_packet pkt;
2876 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
2877 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
2881 dev_err(rdev->dev, "Packet0 not allowed!\n");
2888 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
2889 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
2890 idx += pkt.count + 2;
2893 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
2899 } while (idx < ib->length_dw);