2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
37 int radeon_debugfs_ring_init(struct radeon_device *rdev);
39 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
54 new_page = radeon_cs_update_pages(p, pg_idx);
56 p->parser_error = new_page;
60 idx_value = ibc->kpage[new_page][pg_offset/4];
64 void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
67 if (ring->count_dw <= 0) {
68 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
71 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
80 bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
84 /* only free ib which have been emited */
85 if (ib->fence && ib->fence->emitted) {
86 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
88 radeon_sa_bo_free(rdev, &ib->sa_bo);
95 int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size)
98 struct radeon_fence *fence;
103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
106 r = radeon_fence_create(rdev, &fence, ring);
108 dev_err(rdev->dev, "failed to create fence for new IB\n");
112 radeon_mutex_lock(&rdev->ib_pool.mutex);
113 idx = rdev->ib_pool.head_id;
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117 radeon_mutex_unlock(&rdev->ib_pool.mutex);
118 radeon_fence_unref(&fence);
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 &rdev->ib_pool.ibs[idx].sa_bo,
129 *ib = &rdev->ib_pool.ibs[idx];
130 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 (*ib)->fence = fence;
136 (*ib)->is_const_ib = false;
137 /* ib are most likely to be allocated in a ring fashion
138 * thus rdev->ib_pool.head_id should be the id of the
141 rdev->ib_pool.head_id = (1 + idx);
142 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
143 radeon_mutex_unlock(&rdev->ib_pool.mutex);
147 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
149 /* this should be rare event, ie all ib scheduled none signaled yet.
151 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
152 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
153 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
157 /* an error happened */
160 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
162 radeon_mutex_unlock(&rdev->ib_pool.mutex);
163 radeon_fence_unref(&fence);
167 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
169 struct radeon_ib *tmp = *ib;
175 radeon_mutex_lock(&rdev->ib_pool.mutex);
176 if (tmp->fence && !tmp->fence->emitted) {
177 radeon_sa_bo_free(rdev, &tmp->sa_bo);
178 radeon_fence_unref(&tmp->fence);
180 radeon_mutex_unlock(&rdev->ib_pool.mutex);
183 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
185 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
188 if (!ib->length_dw || !ring->ready) {
189 /* TODO: Nothings in the ib we should report. */
190 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
194 /* 64 dwords should be enough for fence too */
195 r = radeon_ring_lock(rdev, ring, 64);
197 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
200 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
201 radeon_fence_emit(rdev, ib->fence);
202 radeon_ring_unlock_commit(rdev, ring);
206 int radeon_ib_pool_init(struct radeon_device *rdev)
208 struct radeon_sa_manager tmp;
211 r = radeon_sa_bo_manager_init(rdev, &tmp,
212 RADEON_IB_POOL_SIZE*64*1024,
213 RADEON_GEM_DOMAIN_GTT);
218 radeon_mutex_lock(&rdev->ib_pool.mutex);
219 if (rdev->ib_pool.ready) {
220 radeon_mutex_unlock(&rdev->ib_pool.mutex);
221 radeon_sa_bo_manager_fini(rdev, &tmp);
225 rdev->ib_pool.sa_manager = tmp;
226 INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
227 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
228 rdev->ib_pool.ibs[i].fence = NULL;
229 rdev->ib_pool.ibs[i].idx = i;
230 rdev->ib_pool.ibs[i].length_dw = 0;
231 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
233 rdev->ib_pool.head_id = 0;
234 rdev->ib_pool.ready = true;
235 DRM_INFO("radeon: ib pool ready.\n");
237 if (radeon_debugfs_ib_init(rdev)) {
238 DRM_ERROR("Failed to register debugfs file for IB !\n");
240 if (radeon_debugfs_ring_init(rdev)) {
241 DRM_ERROR("Failed to register debugfs file for rings !\n");
243 radeon_mutex_unlock(&rdev->ib_pool.mutex);
247 void radeon_ib_pool_fini(struct radeon_device *rdev)
251 radeon_mutex_lock(&rdev->ib_pool.mutex);
252 if (rdev->ib_pool.ready) {
253 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
254 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
255 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
257 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
258 rdev->ib_pool.ready = false;
260 radeon_mutex_unlock(&rdev->ib_pool.mutex);
263 int radeon_ib_pool_start(struct radeon_device *rdev)
265 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
268 int radeon_ib_pool_suspend(struct radeon_device *rdev)
270 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
276 int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
278 /* r1xx-r5xx only has CP ring */
279 if (rdev->family < CHIP_R600)
280 return RADEON_RING_TYPE_GFX_INDEX;
282 if (rdev->family >= CHIP_CAYMAN) {
283 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
284 return CAYMAN_RING_TYPE_CP1_INDEX;
285 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
286 return CAYMAN_RING_TYPE_CP2_INDEX;
288 return RADEON_RING_TYPE_GFX_INDEX;
291 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
295 if (rdev->wb.enabled)
296 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
298 rptr = RREG32(ring->rptr_reg);
299 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
300 /* This works because ring_size is a power of 2 */
301 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
302 ring->ring_free_dw -= ring->wptr;
303 ring->ring_free_dw &= ring->ptr_mask;
304 if (!ring->ring_free_dw) {
305 ring->ring_free_dw = ring->ring_size / 4;
310 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
314 /* Align requested size with padding so unlock_commit can
316 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
317 while (ndw > (ring->ring_free_dw - 1)) {
318 radeon_ring_free_size(rdev, ring);
319 if (ndw < ring->ring_free_dw) {
322 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
326 ring->count_dw = ndw;
327 ring->wptr_old = ring->wptr;
331 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
335 mutex_lock(&ring->mutex);
336 r = radeon_ring_alloc(rdev, ring, ndw);
338 mutex_unlock(&ring->mutex);
344 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
346 unsigned count_dw_pad;
349 /* We pad to match fetch size */
350 count_dw_pad = (ring->align_mask + 1) -
351 (ring->wptr & ring->align_mask);
352 for (i = 0; i < count_dw_pad; i++) {
353 radeon_ring_write(ring, ring->nop);
356 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
357 (void)RREG32(ring->wptr_reg);
360 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
362 radeon_ring_commit(rdev, ring);
363 mutex_unlock(&ring->mutex);
366 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
368 ring->wptr = ring->wptr_old;
369 mutex_unlock(&ring->mutex);
372 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
373 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
374 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
378 ring->ring_size = ring_size;
379 ring->rptr_offs = rptr_offs;
380 ring->rptr_reg = rptr_reg;
381 ring->wptr_reg = wptr_reg;
382 ring->ptr_reg_shift = ptr_reg_shift;
383 ring->ptr_reg_mask = ptr_reg_mask;
385 /* Allocate ring buffer */
386 if (ring->ring_obj == NULL) {
387 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
388 RADEON_GEM_DOMAIN_GTT,
391 dev_err(rdev->dev, "(%d) ring create failed\n", r);
394 r = radeon_bo_reserve(ring->ring_obj, false);
395 if (unlikely(r != 0))
397 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
400 radeon_bo_unreserve(ring->ring_obj);
401 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
404 r = radeon_bo_kmap(ring->ring_obj,
405 (void **)&ring->ring);
406 radeon_bo_unreserve(ring->ring_obj);
408 dev_err(rdev->dev, "(%d) ring map failed\n", r);
412 ring->ptr_mask = (ring->ring_size / 4) - 1;
413 ring->ring_free_dw = ring->ring_size / 4;
417 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
420 struct radeon_bo *ring_obj;
422 mutex_lock(&ring->mutex);
423 ring_obj = ring->ring_obj;
425 ring->ring_obj = NULL;
426 mutex_unlock(&ring->mutex);
429 r = radeon_bo_reserve(ring_obj, false);
430 if (likely(r == 0)) {
431 radeon_bo_kunmap(ring_obj);
432 radeon_bo_unpin(ring_obj);
433 radeon_bo_unreserve(ring_obj);
435 radeon_bo_unref(&ring_obj);
442 #if defined(CONFIG_DEBUG_FS)
444 static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 struct radeon_device *rdev = dev->dev_private;
449 int ridx = *(int*)node->info_ent->data;
450 struct radeon_ring *ring = &rdev->ring[ridx];
451 unsigned count, i, j;
453 radeon_ring_free_size(rdev, ring);
454 count = (ring->ring_size / 4) - ring->ring_free_dw;
455 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
456 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
457 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
458 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
459 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
460 seq_printf(m, "%u dwords in ring\n", count);
462 for (j = 0; j <= count; j++) {
463 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
464 i = (i + 1) & ring->ptr_mask;
469 static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
470 static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
471 static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
473 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
474 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
475 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
476 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
479 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
481 struct drm_info_node *node = (struct drm_info_node *) m->private;
482 struct drm_device *dev = node->minor->dev;
483 struct radeon_device *rdev = dev->dev_private;
484 struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
490 seq_printf(m, "IB %04u\n", ib->idx);
491 seq_printf(m, "IB fence %p\n", ib->fence);
492 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
493 for (i = 0; i < ib->length_dw; i++) {
494 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
499 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
500 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
501 static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
504 int radeon_debugfs_ring_init(struct radeon_device *rdev)
506 #if defined(CONFIG_DEBUG_FS)
507 if (rdev->family >= CHIP_CAYMAN)
508 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
509 ARRAY_SIZE(radeon_debugfs_ring_info_list));
511 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
517 int radeon_debugfs_ib_init(struct radeon_device *rdev)
519 #if defined(CONFIG_DEBUG_FS)
522 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
523 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
524 radeon_debugfs_ib_idx[i] = i;
525 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
526 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
527 radeon_debugfs_ib_list[i].driver_features = 0;
528 radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
530 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
531 RADEON_IB_POOL_SIZE);