2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
41 #include <linux/firmware.h>
42 #include <linux/platform_device.h>
44 #include "r100_reg_safe.h"
45 #include "rn50_reg_safe.h"
48 #define FIRMWARE_R100 "radeon/R100_cp.bin"
49 #define FIRMWARE_R200 "radeon/R200_cp.bin"
50 #define FIRMWARE_R300 "radeon/R300_cp.bin"
51 #define FIRMWARE_R420 "radeon/R420_cp.bin"
52 #define FIRMWARE_RS690 "radeon/RS690_cp.bin"
53 #define FIRMWARE_RS600 "radeon/RS600_cp.bin"
54 #define FIRMWARE_R520 "radeon/R520_cp.bin"
56 MODULE_FIRMWARE(FIRMWARE_R100);
57 MODULE_FIRMWARE(FIRMWARE_R200);
58 MODULE_FIRMWARE(FIRMWARE_R300);
59 MODULE_FIRMWARE(FIRMWARE_R420);
60 MODULE_FIRMWARE(FIRMWARE_RS690);
61 MODULE_FIRMWARE(FIRMWARE_RS600);
62 MODULE_FIRMWARE(FIRMWARE_R520);
64 #include "r100_track.h"
66 /* This files gather functions specifics to:
67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
70 void r100_set_power_state(struct radeon_device *rdev)
72 /* if *_clock_mode are the same, *_power_state are as well */
73 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
76 DRM_INFO("Setting: e: %d m: %d p: %d\n",
77 rdev->pm.requested_clock_mode->sclk,
78 rdev->pm.requested_clock_mode->mclk,
79 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
87 /* set engine clock */
88 radeon_sync_with_vblank(rdev);
89 radeon_pm_debug_check_in_vbl(rdev, false);
90 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
91 radeon_pm_debug_check_in_vbl(rdev, true);
94 /* set memory clock */
95 if (rdev->asic->set_memory_clock) {
96 radeon_sync_with_vblank(rdev);
97 radeon_pm_debug_check_in_vbl(rdev, false);
98 radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
99 radeon_pm_debug_check_in_vbl(rdev, true);
103 rdev->pm.current_power_state = rdev->pm.requested_power_state;
104 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
107 bool r100_gui_idle(struct radeon_device *rdev)
109 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
115 /* hpd for digital panel detect/disconnect */
116 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
118 bool connected = false;
122 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
126 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
135 void r100_hpd_set_polarity(struct radeon_device *rdev,
136 enum radeon_hpd_id hpd)
139 bool connected = r100_hpd_sense(rdev, hpd);
143 tmp = RREG32(RADEON_FP_GEN_CNTL);
145 tmp &= ~RADEON_FP_DETECT_INT_POL;
147 tmp |= RADEON_FP_DETECT_INT_POL;
148 WREG32(RADEON_FP_GEN_CNTL, tmp);
151 tmp = RREG32(RADEON_FP2_GEN_CNTL);
153 tmp &= ~RADEON_FP2_DETECT_INT_POL;
155 tmp |= RADEON_FP2_DETECT_INT_POL;
156 WREG32(RADEON_FP2_GEN_CNTL, tmp);
163 void r100_hpd_init(struct radeon_device *rdev)
165 struct drm_device *dev = rdev->ddev;
166 struct drm_connector *connector;
168 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
169 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
170 switch (radeon_connector->hpd.hpd) {
172 rdev->irq.hpd[0] = true;
175 rdev->irq.hpd[1] = true;
181 if (rdev->irq.installed)
185 void r100_hpd_fini(struct radeon_device *rdev)
187 struct drm_device *dev = rdev->ddev;
188 struct drm_connector *connector;
190 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
191 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
192 switch (radeon_connector->hpd.hpd) {
194 rdev->irq.hpd[0] = false;
197 rdev->irq.hpd[1] = false;
208 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
210 /* TODO: can we do somethings here ? */
211 /* It seems hw only cache one entry so we should discard this
212 * entry otherwise if first GPU GART read hit this entry it
213 * could end up in wrong address. */
216 int r100_pci_gart_init(struct radeon_device *rdev)
220 if (rdev->gart.table.ram.ptr) {
221 WARN(1, "R100 PCI GART already initialized.\n");
224 /* Initialize common gart structure */
225 r = radeon_gart_init(rdev);
228 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
229 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
230 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
231 return radeon_gart_table_ram_alloc(rdev);
234 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
235 void r100_enable_bm(struct radeon_device *rdev)
238 /* Enable bus mastering */
239 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
240 WREG32(RADEON_BUS_CNTL, tmp);
243 int r100_pci_gart_enable(struct radeon_device *rdev)
247 radeon_gart_restore(rdev);
248 /* discard memory request outside of configured range */
249 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
250 WREG32(RADEON_AIC_CNTL, tmp);
251 /* set address range for PCI address translate */
252 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
253 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
254 /* set PCI GART page-table base address */
255 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
256 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
257 WREG32(RADEON_AIC_CNTL, tmp);
258 r100_pci_gart_tlb_flush(rdev);
259 rdev->gart.ready = true;
263 void r100_pci_gart_disable(struct radeon_device *rdev)
267 /* discard memory request outside of configured range */
268 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
269 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
270 WREG32(RADEON_AIC_LO_ADDR, 0);
271 WREG32(RADEON_AIC_HI_ADDR, 0);
274 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
276 if (i < 0 || i > rdev->gart.num_gpu_pages) {
279 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
283 void r100_pci_gart_fini(struct radeon_device *rdev)
285 radeon_gart_fini(rdev);
286 r100_pci_gart_disable(rdev);
287 radeon_gart_table_ram_free(rdev);
290 int r100_irq_set(struct radeon_device *rdev)
294 if (!rdev->irq.installed) {
295 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
296 WREG32(R_000040_GEN_INT_CNTL, 0);
299 if (rdev->irq.sw_int) {
300 tmp |= RADEON_SW_INT_ENABLE;
302 if (rdev->irq.gui_idle) {
303 tmp |= RADEON_GUI_IDLE_MASK;
305 if (rdev->irq.crtc_vblank_int[0]) {
306 tmp |= RADEON_CRTC_VBLANK_MASK;
308 if (rdev->irq.crtc_vblank_int[1]) {
309 tmp |= RADEON_CRTC2_VBLANK_MASK;
311 if (rdev->irq.hpd[0]) {
312 tmp |= RADEON_FP_DETECT_MASK;
314 if (rdev->irq.hpd[1]) {
315 tmp |= RADEON_FP2_DETECT_MASK;
317 WREG32(RADEON_GEN_INT_CNTL, tmp);
321 void r100_irq_disable(struct radeon_device *rdev)
325 WREG32(R_000040_GEN_INT_CNTL, 0);
326 /* Wait and acknowledge irq */
328 tmp = RREG32(R_000044_GEN_INT_STATUS);
329 WREG32(R_000044_GEN_INT_STATUS, tmp);
332 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
334 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
335 uint32_t irq_mask = RADEON_SW_INT_TEST |
336 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
337 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
339 /* the interrupt works, but the status bit is permanently asserted */
340 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
341 if (!rdev->irq.gui_idle_acked)
342 irq_mask |= RADEON_GUI_IDLE_STAT;
346 WREG32(RADEON_GEN_INT_STATUS, irqs);
348 return irqs & irq_mask;
351 int r100_irq_process(struct radeon_device *rdev)
353 uint32_t status, msi_rearm;
354 bool queue_hotplug = false;
356 /* reset gui idle ack. the status bit is broken */
357 rdev->irq.gui_idle_acked = false;
359 status = r100_irq_ack(rdev);
363 if (rdev->shutdown) {
368 if (status & RADEON_SW_INT_TEST) {
369 radeon_fence_process(rdev);
371 /* gui idle interrupt */
372 if (status & RADEON_GUI_IDLE_STAT) {
373 rdev->irq.gui_idle_acked = true;
374 rdev->pm.gui_idle = true;
375 wake_up(&rdev->irq.idle_queue);
377 /* Vertical blank interrupts */
378 if (status & RADEON_CRTC_VBLANK_STAT) {
379 drm_handle_vblank(rdev->ddev, 0);
380 rdev->pm.vblank_sync = true;
381 wake_up(&rdev->irq.vblank_queue);
383 if (status & RADEON_CRTC2_VBLANK_STAT) {
384 drm_handle_vblank(rdev->ddev, 1);
385 rdev->pm.vblank_sync = true;
386 wake_up(&rdev->irq.vblank_queue);
388 if (status & RADEON_FP_DETECT_STAT) {
389 queue_hotplug = true;
392 if (status & RADEON_FP2_DETECT_STAT) {
393 queue_hotplug = true;
396 status = r100_irq_ack(rdev);
398 /* reset gui idle ack. the status bit is broken */
399 rdev->irq.gui_idle_acked = false;
401 queue_work(rdev->wq, &rdev->hotplug_work);
402 if (rdev->msi_enabled) {
403 switch (rdev->family) {
406 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
407 WREG32(RADEON_AIC_CNTL, msi_rearm);
408 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
411 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
412 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
413 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
420 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
423 return RREG32(RADEON_CRTC_CRNT_FRAME);
425 return RREG32(RADEON_CRTC2_CRNT_FRAME);
428 /* Who ever call radeon_fence_emit should call ring_lock and ask
429 * for enough space (today caller are ib schedule and buffer move) */
430 void r100_fence_ring_emit(struct radeon_device *rdev,
431 struct radeon_fence *fence)
433 /* We have to make sure that caches are flushed before
434 * CPU might read something from VRAM. */
435 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
436 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
437 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
438 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
439 /* Wait until IDLE & CLEAN */
440 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
441 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
442 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
443 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
444 RADEON_HDP_READ_BUFFER_INVALIDATE);
445 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
446 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
447 /* Emit fence sequence & fire IRQ */
448 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
449 radeon_ring_write(rdev, fence->seq);
450 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
451 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
454 int r100_wb_init(struct radeon_device *rdev)
458 if (rdev->wb.wb_obj == NULL) {
459 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
460 RADEON_GEM_DOMAIN_GTT,
463 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
466 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
467 if (unlikely(r != 0))
469 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
472 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
473 radeon_bo_unreserve(rdev->wb.wb_obj);
476 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
477 radeon_bo_unreserve(rdev->wb.wb_obj);
479 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
483 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
484 WREG32(R_00070C_CP_RB_RPTR_ADDR,
485 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
486 WREG32(R_000770_SCRATCH_UMSK, 0xff);
490 void r100_wb_disable(struct radeon_device *rdev)
492 WREG32(R_000770_SCRATCH_UMSK, 0);
495 void r100_wb_fini(struct radeon_device *rdev)
499 r100_wb_disable(rdev);
500 if (rdev->wb.wb_obj) {
501 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
502 if (unlikely(r != 0)) {
503 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
506 radeon_bo_kunmap(rdev->wb.wb_obj);
507 radeon_bo_unpin(rdev->wb.wb_obj);
508 radeon_bo_unreserve(rdev->wb.wb_obj);
509 radeon_bo_unref(&rdev->wb.wb_obj);
511 rdev->wb.wb_obj = NULL;
515 int r100_copy_blit(struct radeon_device *rdev,
519 struct radeon_fence *fence)
522 uint32_t stride_bytes = PAGE_SIZE;
524 uint32_t stride_pixels;
529 /* radeon limited to 16k stride */
530 stride_bytes &= 0x3fff;
531 /* radeon pitch is /64 */
532 pitch = stride_bytes / 64;
533 stride_pixels = stride_bytes / 4;
534 num_loops = DIV_ROUND_UP(num_pages, 8191);
536 /* Ask for enough room for blit + flush + fence */
537 ndw = 64 + (10 * num_loops);
538 r = radeon_ring_lock(rdev, ndw);
540 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
543 while (num_pages > 0) {
544 cur_pages = num_pages;
545 if (cur_pages > 8191) {
548 num_pages -= cur_pages;
550 /* pages are in Y direction - height
551 page width in X direction - width */
552 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
553 radeon_ring_write(rdev,
554 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
555 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
556 RADEON_GMC_SRC_CLIPPING |
557 RADEON_GMC_DST_CLIPPING |
558 RADEON_GMC_BRUSH_NONE |
559 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
560 RADEON_GMC_SRC_DATATYPE_COLOR |
562 RADEON_DP_SRC_SOURCE_MEMORY |
563 RADEON_GMC_CLR_CMP_CNTL_DIS |
564 RADEON_GMC_WR_MSK_DIS);
565 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
566 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
567 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
568 radeon_ring_write(rdev, 0);
569 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
570 radeon_ring_write(rdev, num_pages);
571 radeon_ring_write(rdev, num_pages);
572 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
574 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
575 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
576 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
577 radeon_ring_write(rdev,
578 RADEON_WAIT_2D_IDLECLEAN |
579 RADEON_WAIT_HOST_IDLECLEAN |
580 RADEON_WAIT_DMA_GUI_IDLE);
582 r = radeon_fence_emit(rdev, fence);
584 radeon_ring_unlock_commit(rdev);
588 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
593 for (i = 0; i < rdev->usec_timeout; i++) {
594 tmp = RREG32(R_000E40_RBBM_STATUS);
595 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
603 void r100_ring_start(struct radeon_device *rdev)
607 r = radeon_ring_lock(rdev, 2);
611 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
612 radeon_ring_write(rdev,
613 RADEON_ISYNC_ANY2D_IDLE3D |
614 RADEON_ISYNC_ANY3D_IDLE2D |
615 RADEON_ISYNC_WAIT_IDLEGUI |
616 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
617 radeon_ring_unlock_commit(rdev);
621 /* Load the microcode for the CP */
622 static int r100_cp_init_microcode(struct radeon_device *rdev)
624 struct platform_device *pdev;
625 const char *fw_name = NULL;
630 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
633 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
636 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
637 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
638 (rdev->family == CHIP_RS200)) {
639 DRM_INFO("Loading R100 Microcode\n");
640 fw_name = FIRMWARE_R100;
641 } else if ((rdev->family == CHIP_R200) ||
642 (rdev->family == CHIP_RV250) ||
643 (rdev->family == CHIP_RV280) ||
644 (rdev->family == CHIP_RS300)) {
645 DRM_INFO("Loading R200 Microcode\n");
646 fw_name = FIRMWARE_R200;
647 } else if ((rdev->family == CHIP_R300) ||
648 (rdev->family == CHIP_R350) ||
649 (rdev->family == CHIP_RV350) ||
650 (rdev->family == CHIP_RV380) ||
651 (rdev->family == CHIP_RS400) ||
652 (rdev->family == CHIP_RS480)) {
653 DRM_INFO("Loading R300 Microcode\n");
654 fw_name = FIRMWARE_R300;
655 } else if ((rdev->family == CHIP_R420) ||
656 (rdev->family == CHIP_R423) ||
657 (rdev->family == CHIP_RV410)) {
658 DRM_INFO("Loading R400 Microcode\n");
659 fw_name = FIRMWARE_R420;
660 } else if ((rdev->family == CHIP_RS690) ||
661 (rdev->family == CHIP_RS740)) {
662 DRM_INFO("Loading RS690/RS740 Microcode\n");
663 fw_name = FIRMWARE_RS690;
664 } else if (rdev->family == CHIP_RS600) {
665 DRM_INFO("Loading RS600 Microcode\n");
666 fw_name = FIRMWARE_RS600;
667 } else if ((rdev->family == CHIP_RV515) ||
668 (rdev->family == CHIP_R520) ||
669 (rdev->family == CHIP_RV530) ||
670 (rdev->family == CHIP_R580) ||
671 (rdev->family == CHIP_RV560) ||
672 (rdev->family == CHIP_RV570)) {
673 DRM_INFO("Loading R500 Microcode\n");
674 fw_name = FIRMWARE_R520;
677 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
678 platform_device_unregister(pdev);
680 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
682 } else if (rdev->me_fw->size % 8) {
684 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
685 rdev->me_fw->size, fw_name);
687 release_firmware(rdev->me_fw);
693 static void r100_cp_load_microcode(struct radeon_device *rdev)
695 const __be32 *fw_data;
698 if (r100_gui_wait_for_idle(rdev)) {
699 printk(KERN_WARNING "Failed to wait GUI idle while "
700 "programming pipes. Bad things might happen.\n");
704 size = rdev->me_fw->size / 4;
705 fw_data = (const __be32 *)&rdev->me_fw->data[0];
706 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
707 for (i = 0; i < size; i += 2) {
708 WREG32(RADEON_CP_ME_RAM_DATAH,
709 be32_to_cpup(&fw_data[i]));
710 WREG32(RADEON_CP_ME_RAM_DATAL,
711 be32_to_cpup(&fw_data[i + 1]));
716 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
721 unsigned pre_write_timer;
722 unsigned pre_write_limit;
723 unsigned indirect2_start;
724 unsigned indirect1_start;
728 if (r100_debugfs_cp_init(rdev)) {
729 DRM_ERROR("Failed to register debugfs file for CP !\n");
732 r = r100_cp_init_microcode(rdev);
734 DRM_ERROR("Failed to load firmware!\n");
739 /* Align ring size */
740 rb_bufsz = drm_order(ring_size / 8);
741 ring_size = (1 << (rb_bufsz + 1)) * 4;
742 r100_cp_load_microcode(rdev);
743 r = radeon_ring_init(rdev, ring_size);
747 /* Each time the cp read 1024 bytes (16 dword/quadword) update
748 * the rptr copy in system ram */
750 /* cp will read 128bytes at a time (4 dwords) */
752 rdev->cp.align_mask = 16 - 1;
753 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
754 pre_write_timer = 64;
755 /* Force CP_RB_WPTR write if written more than one time before the
759 /* Setup the cp cache like this (cache size is 96 dwords) :
763 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
764 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
765 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
766 * Idea being that most of the gpu cmd will be through indirect1 buffer
767 * so it gets the bigger cache.
769 indirect2_start = 80;
770 indirect1_start = 16;
772 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
773 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
774 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
775 REG_SET(RADEON_MAX_FETCH, max_fetch) |
776 RADEON_RB_NO_UPDATE);
778 tmp |= RADEON_BUF_SWAP_32BIT;
780 WREG32(RADEON_CP_RB_CNTL, tmp);
782 /* Set ring address */
783 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
784 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
785 /* Force read & write ptr to 0 */
786 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
787 WREG32(RADEON_CP_RB_RPTR_WR, 0);
788 WREG32(RADEON_CP_RB_WPTR, 0);
789 WREG32(RADEON_CP_RB_CNTL, tmp);
791 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
792 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
793 /* protect against crazy HW on resume */
794 rdev->cp.wptr &= rdev->cp.ptr_mask;
795 /* Set cp mode to bus mastering & enable cp*/
796 WREG32(RADEON_CP_CSQ_MODE,
797 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
798 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
800 WREG32(0x744, 0x00004D4D);
801 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
802 radeon_ring_start(rdev);
803 r = radeon_ring_test(rdev);
805 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
808 rdev->cp.ready = true;
812 void r100_cp_fini(struct radeon_device *rdev)
814 if (r100_cp_wait_for_idle(rdev)) {
815 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
818 r100_cp_disable(rdev);
819 radeon_ring_fini(rdev);
820 DRM_INFO("radeon: cp finalized\n");
823 void r100_cp_disable(struct radeon_device *rdev)
826 rdev->cp.ready = false;
827 WREG32(RADEON_CP_CSQ_MODE, 0);
828 WREG32(RADEON_CP_CSQ_CNTL, 0);
829 if (r100_gui_wait_for_idle(rdev)) {
830 printk(KERN_WARNING "Failed to wait GUI idle while "
831 "programming pipes. Bad things might happen.\n");
835 void r100_cp_commit(struct radeon_device *rdev)
837 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
838 (void)RREG32(RADEON_CP_RB_WPTR);
845 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
846 struct radeon_cs_packet *pkt,
847 const unsigned *auth, unsigned n,
848 radeon_packet0_check_t check)
857 /* Check that register fall into register range
858 * determined by the number of entry (n) in the
859 * safe register bitmap.
861 if (pkt->one_reg_wr) {
862 if ((reg >> 7) > n) {
866 if (((reg + (pkt->count << 2)) >> 7) > n) {
870 for (i = 0; i <= pkt->count; i++, idx++) {
872 m = 1 << ((reg >> 2) & 31);
874 r = check(p, pkt, idx, reg);
879 if (pkt->one_reg_wr) {
880 if (!(auth[j] & m)) {
890 void r100_cs_dump_packet(struct radeon_cs_parser *p,
891 struct radeon_cs_packet *pkt)
893 volatile uint32_t *ib;
899 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
900 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
905 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
906 * @parser: parser structure holding parsing context.
907 * @pkt: where to store packet informations
909 * Assume that chunk_ib_index is properly set. Will return -EINVAL
910 * if packet is bigger than remaining ib size. or if packets is unknown.
912 int r100_cs_packet_parse(struct radeon_cs_parser *p,
913 struct radeon_cs_packet *pkt,
916 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
919 if (idx >= ib_chunk->length_dw) {
920 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
921 idx, ib_chunk->length_dw);
924 header = radeon_get_ib_value(p, idx);
926 pkt->type = CP_PACKET_GET_TYPE(header);
927 pkt->count = CP_PACKET_GET_COUNT(header);
930 pkt->reg = CP_PACKET0_GET_REG(header);
931 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
934 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
940 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
943 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
944 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
945 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
952 * r100_cs_packet_next_vline() - parse userspace VLINE packet
953 * @parser: parser structure holding parsing context.
955 * Userspace sends a special sequence for VLINE waits.
956 * PACKET0 - VLINE_START_END + value
957 * PACKET0 - WAIT_UNTIL +_value
958 * RELOC (P3) - crtc_id in reloc.
960 * This function parses this and relocates the VLINE START END
961 * and WAIT UNTIL packets to the correct crtc.
962 * It also detects a switched off crtc and nulls out the
965 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
967 struct drm_mode_object *obj;
968 struct drm_crtc *crtc;
969 struct radeon_crtc *radeon_crtc;
970 struct radeon_cs_packet p3reloc, waitreloc;
973 uint32_t header, h_idx, reg;
974 volatile uint32_t *ib;
978 /* parse the wait until */
979 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
983 /* check its a wait until and only 1 count */
984 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
985 waitreloc.count != 0) {
986 DRM_ERROR("vline wait had illegal wait until segment\n");
991 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
992 DRM_ERROR("vline wait had illegal wait until\n");
997 /* jump over the NOP */
998 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1003 p->idx += waitreloc.count + 2;
1004 p->idx += p3reloc.count + 2;
1006 header = radeon_get_ib_value(p, h_idx);
1007 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1008 reg = CP_PACKET0_GET_REG(header);
1009 mutex_lock(&p->rdev->ddev->mode_config.mutex);
1010 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1012 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1016 crtc = obj_to_crtc(obj);
1017 radeon_crtc = to_radeon_crtc(crtc);
1018 crtc_id = radeon_crtc->crtc_id;
1020 if (!crtc->enabled) {
1021 /* if the CRTC isn't enabled - we need to nop out the wait until */
1022 ib[h_idx + 2] = PACKET2(0);
1023 ib[h_idx + 3] = PACKET2(0);
1024 } else if (crtc_id == 1) {
1026 case AVIVO_D1MODE_VLINE_START_END:
1027 header &= ~R300_CP_PACKET0_REG_MASK;
1028 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1030 case RADEON_CRTC_GUI_TRIG_VLINE:
1031 header &= ~R300_CP_PACKET0_REG_MASK;
1032 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1035 DRM_ERROR("unknown crtc reloc\n");
1040 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1043 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
1048 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1049 * @parser: parser structure holding parsing context.
1050 * @data: pointer to relocation data
1051 * @offset_start: starting offset
1052 * @offset_mask: offset mask (to align start offset on)
1053 * @reloc: reloc informations
1055 * Check next packet is relocation packet3, do bo validation and compute
1056 * GPU offset using the provided start.
1058 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1059 struct radeon_cs_reloc **cs_reloc)
1061 struct radeon_cs_chunk *relocs_chunk;
1062 struct radeon_cs_packet p3reloc;
1066 if (p->chunk_relocs_idx == -1) {
1067 DRM_ERROR("No relocation chunk !\n");
1071 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1072 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1076 p->idx += p3reloc.count + 2;
1077 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1078 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1080 r100_cs_dump_packet(p, &p3reloc);
1083 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1084 if (idx >= relocs_chunk->length_dw) {
1085 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1086 idx, relocs_chunk->length_dw);
1087 r100_cs_dump_packet(p, &p3reloc);
1090 /* FIXME: we assume reloc size is 4 dwords */
1091 *cs_reloc = p->relocs_ptr[(idx / 4)];
1095 static int r100_get_vtx_size(uint32_t vtx_fmt)
1099 /* ordered according to bits in spec */
1100 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1102 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1104 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1106 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1108 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1110 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1112 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1114 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1116 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1118 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1120 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1122 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1124 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1126 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1128 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1131 if (vtx_fmt & (0x7 << 15))
1132 vtx_size += (vtx_fmt >> 15) & 0x7;
1133 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1135 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1137 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1139 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1141 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1143 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1148 static int r100_packet0_check(struct radeon_cs_parser *p,
1149 struct radeon_cs_packet *pkt,
1150 unsigned idx, unsigned reg)
1152 struct radeon_cs_reloc *reloc;
1153 struct r100_cs_track *track;
1154 volatile uint32_t *ib;
1162 track = (struct r100_cs_track *)p->track;
1164 idx_value = radeon_get_ib_value(p, idx);
1167 case RADEON_CRTC_GUI_TRIG_VLINE:
1168 r = r100_cs_packet_parse_vline(p);
1170 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1172 r100_cs_dump_packet(p, pkt);
1176 /* FIXME: only allow PACKET3 blit? easier to check for out of
1178 case RADEON_DST_PITCH_OFFSET:
1179 case RADEON_SRC_PITCH_OFFSET:
1180 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1184 case RADEON_RB3D_DEPTHOFFSET:
1185 r = r100_cs_packet_next_reloc(p, &reloc);
1187 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1189 r100_cs_dump_packet(p, pkt);
1192 track->zb.robj = reloc->robj;
1193 track->zb.offset = idx_value;
1194 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1196 case RADEON_RB3D_COLOROFFSET:
1197 r = r100_cs_packet_next_reloc(p, &reloc);
1199 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1201 r100_cs_dump_packet(p, pkt);
1204 track->cb[0].robj = reloc->robj;
1205 track->cb[0].offset = idx_value;
1206 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1208 case RADEON_PP_TXOFFSET_0:
1209 case RADEON_PP_TXOFFSET_1:
1210 case RADEON_PP_TXOFFSET_2:
1211 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1212 r = r100_cs_packet_next_reloc(p, &reloc);
1214 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1216 r100_cs_dump_packet(p, pkt);
1219 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1220 track->textures[i].robj = reloc->robj;
1222 case RADEON_PP_CUBIC_OFFSET_T0_0:
1223 case RADEON_PP_CUBIC_OFFSET_T0_1:
1224 case RADEON_PP_CUBIC_OFFSET_T0_2:
1225 case RADEON_PP_CUBIC_OFFSET_T0_3:
1226 case RADEON_PP_CUBIC_OFFSET_T0_4:
1227 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1228 r = r100_cs_packet_next_reloc(p, &reloc);
1230 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1232 r100_cs_dump_packet(p, pkt);
1235 track->textures[0].cube_info[i].offset = idx_value;
1236 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1237 track->textures[0].cube_info[i].robj = reloc->robj;
1239 case RADEON_PP_CUBIC_OFFSET_T1_0:
1240 case RADEON_PP_CUBIC_OFFSET_T1_1:
1241 case RADEON_PP_CUBIC_OFFSET_T1_2:
1242 case RADEON_PP_CUBIC_OFFSET_T1_3:
1243 case RADEON_PP_CUBIC_OFFSET_T1_4:
1244 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1245 r = r100_cs_packet_next_reloc(p, &reloc);
1247 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1249 r100_cs_dump_packet(p, pkt);
1252 track->textures[1].cube_info[i].offset = idx_value;
1253 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1254 track->textures[1].cube_info[i].robj = reloc->robj;
1256 case RADEON_PP_CUBIC_OFFSET_T2_0:
1257 case RADEON_PP_CUBIC_OFFSET_T2_1:
1258 case RADEON_PP_CUBIC_OFFSET_T2_2:
1259 case RADEON_PP_CUBIC_OFFSET_T2_3:
1260 case RADEON_PP_CUBIC_OFFSET_T2_4:
1261 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1262 r = r100_cs_packet_next_reloc(p, &reloc);
1264 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1266 r100_cs_dump_packet(p, pkt);
1269 track->textures[2].cube_info[i].offset = idx_value;
1270 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1271 track->textures[2].cube_info[i].robj = reloc->robj;
1273 case RADEON_RE_WIDTH_HEIGHT:
1274 track->maxy = ((idx_value >> 16) & 0x7FF);
1276 case RADEON_RB3D_COLORPITCH:
1277 r = r100_cs_packet_next_reloc(p, &reloc);
1279 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1281 r100_cs_dump_packet(p, pkt);
1285 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1286 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1287 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1288 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1290 tmp = idx_value & ~(0x7 << 16);
1294 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1296 case RADEON_RB3D_DEPTHPITCH:
1297 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1299 case RADEON_RB3D_CNTL:
1300 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1306 track->cb[0].cpp = 1;
1311 track->cb[0].cpp = 2;
1314 track->cb[0].cpp = 4;
1317 DRM_ERROR("Invalid color buffer format (%d) !\n",
1318 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1321 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1323 case RADEON_RB3D_ZSTENCILCNTL:
1324 switch (idx_value & 0xf) {
1340 case RADEON_RB3D_ZPASS_ADDR:
1341 r = r100_cs_packet_next_reloc(p, &reloc);
1343 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1345 r100_cs_dump_packet(p, pkt);
1348 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1350 case RADEON_PP_CNTL:
1352 uint32_t temp = idx_value >> 4;
1353 for (i = 0; i < track->num_texture; i++)
1354 track->textures[i].enabled = !!(temp & (1 << i));
1357 case RADEON_SE_VF_CNTL:
1358 track->vap_vf_cntl = idx_value;
1360 case RADEON_SE_VTX_FMT:
1361 track->vtx_size = r100_get_vtx_size(idx_value);
1363 case RADEON_PP_TEX_SIZE_0:
1364 case RADEON_PP_TEX_SIZE_1:
1365 case RADEON_PP_TEX_SIZE_2:
1366 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1367 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1368 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1370 case RADEON_PP_TEX_PITCH_0:
1371 case RADEON_PP_TEX_PITCH_1:
1372 case RADEON_PP_TEX_PITCH_2:
1373 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1374 track->textures[i].pitch = idx_value + 32;
1376 case RADEON_PP_TXFILTER_0:
1377 case RADEON_PP_TXFILTER_1:
1378 case RADEON_PP_TXFILTER_2:
1379 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1380 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1381 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1382 tmp = (idx_value >> 23) & 0x7;
1383 if (tmp == 2 || tmp == 6)
1384 track->textures[i].roundup_w = false;
1385 tmp = (idx_value >> 27) & 0x7;
1386 if (tmp == 2 || tmp == 6)
1387 track->textures[i].roundup_h = false;
1389 case RADEON_PP_TXFORMAT_0:
1390 case RADEON_PP_TXFORMAT_1:
1391 case RADEON_PP_TXFORMAT_2:
1392 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1393 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1394 track->textures[i].use_pitch = 1;
1396 track->textures[i].use_pitch = 0;
1397 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1398 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1400 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1401 track->textures[i].tex_coord_type = 2;
1402 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1403 case RADEON_TXFORMAT_I8:
1404 case RADEON_TXFORMAT_RGB332:
1405 case RADEON_TXFORMAT_Y8:
1406 track->textures[i].cpp = 1;
1408 case RADEON_TXFORMAT_AI88:
1409 case RADEON_TXFORMAT_ARGB1555:
1410 case RADEON_TXFORMAT_RGB565:
1411 case RADEON_TXFORMAT_ARGB4444:
1412 case RADEON_TXFORMAT_VYUY422:
1413 case RADEON_TXFORMAT_YVYU422:
1414 case RADEON_TXFORMAT_SHADOW16:
1415 case RADEON_TXFORMAT_LDUDV655:
1416 case RADEON_TXFORMAT_DUDV88:
1417 track->textures[i].cpp = 2;
1419 case RADEON_TXFORMAT_ARGB8888:
1420 case RADEON_TXFORMAT_RGBA8888:
1421 case RADEON_TXFORMAT_SHADOW32:
1422 case RADEON_TXFORMAT_LDUDUV8888:
1423 track->textures[i].cpp = 4;
1425 case RADEON_TXFORMAT_DXT1:
1426 track->textures[i].cpp = 1;
1427 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1429 case RADEON_TXFORMAT_DXT23:
1430 case RADEON_TXFORMAT_DXT45:
1431 track->textures[i].cpp = 1;
1432 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1435 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1436 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1438 case RADEON_PP_CUBIC_FACES_0:
1439 case RADEON_PP_CUBIC_FACES_1:
1440 case RADEON_PP_CUBIC_FACES_2:
1442 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1443 for (face = 0; face < 4; face++) {
1444 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1445 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1449 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1456 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1457 struct radeon_cs_packet *pkt,
1458 struct radeon_bo *robj)
1463 value = radeon_get_ib_value(p, idx + 2);
1464 if ((value + 1) > radeon_bo_size(robj)) {
1465 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1466 "(need %u have %lu) !\n",
1468 radeon_bo_size(robj));
1474 static int r100_packet3_check(struct radeon_cs_parser *p,
1475 struct radeon_cs_packet *pkt)
1477 struct radeon_cs_reloc *reloc;
1478 struct r100_cs_track *track;
1480 volatile uint32_t *ib;
1485 track = (struct r100_cs_track *)p->track;
1486 switch (pkt->opcode) {
1487 case PACKET3_3D_LOAD_VBPNTR:
1488 r = r100_packet3_load_vbpntr(p, pkt, idx);
1492 case PACKET3_INDX_BUFFER:
1493 r = r100_cs_packet_next_reloc(p, &reloc);
1495 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1496 r100_cs_dump_packet(p, pkt);
1499 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1500 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1506 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1507 r = r100_cs_packet_next_reloc(p, &reloc);
1509 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1510 r100_cs_dump_packet(p, pkt);
1513 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1514 track->num_arrays = 1;
1515 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1517 track->arrays[0].robj = reloc->robj;
1518 track->arrays[0].esize = track->vtx_size;
1520 track->max_indx = radeon_get_ib_value(p, idx+1);
1522 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1523 track->immd_dwords = pkt->count - 1;
1524 r = r100_cs_track_check(p->rdev, track);
1528 case PACKET3_3D_DRAW_IMMD:
1529 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1530 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1533 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1534 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1535 track->immd_dwords = pkt->count - 1;
1536 r = r100_cs_track_check(p->rdev, track);
1540 /* triggers drawing using in-packet vertex data */
1541 case PACKET3_3D_DRAW_IMMD_2:
1542 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1543 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1546 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1547 track->immd_dwords = pkt->count;
1548 r = r100_cs_track_check(p->rdev, track);
1552 /* triggers drawing using in-packet vertex data */
1553 case PACKET3_3D_DRAW_VBUF_2:
1554 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1555 r = r100_cs_track_check(p->rdev, track);
1559 /* triggers drawing of vertex buffers setup elsewhere */
1560 case PACKET3_3D_DRAW_INDX_2:
1561 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1562 r = r100_cs_track_check(p->rdev, track);
1566 /* triggers drawing using indices to vertex buffer */
1567 case PACKET3_3D_DRAW_VBUF:
1568 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1569 r = r100_cs_track_check(p->rdev, track);
1573 /* triggers drawing of vertex buffers setup elsewhere */
1574 case PACKET3_3D_DRAW_INDX:
1575 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1576 r = r100_cs_track_check(p->rdev, track);
1580 /* triggers drawing using indices to vertex buffer */
1584 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1590 int r100_cs_parse(struct radeon_cs_parser *p)
1592 struct radeon_cs_packet pkt;
1593 struct r100_cs_track *track;
1596 track = kzalloc(sizeof(*track), GFP_KERNEL);
1597 r100_cs_track_clear(p->rdev, track);
1600 r = r100_cs_packet_parse(p, &pkt, p->idx);
1604 p->idx += pkt.count + 2;
1607 if (p->rdev->family >= CHIP_R200)
1608 r = r100_cs_parse_packet0(p, &pkt,
1609 p->rdev->config.r100.reg_safe_bm,
1610 p->rdev->config.r100.reg_safe_bm_size,
1611 &r200_packet0_check);
1613 r = r100_cs_parse_packet0(p, &pkt,
1614 p->rdev->config.r100.reg_safe_bm,
1615 p->rdev->config.r100.reg_safe_bm_size,
1616 &r100_packet0_check);
1621 r = r100_packet3_check(p, &pkt);
1624 DRM_ERROR("Unknown packet type %d !\n",
1631 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1637 * Global GPU functions
1639 void r100_errata(struct radeon_device *rdev)
1641 rdev->pll_errata = 0;
1643 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1644 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1647 if (rdev->family == CHIP_RV100 ||
1648 rdev->family == CHIP_RS100 ||
1649 rdev->family == CHIP_RS200) {
1650 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1654 /* Wait for vertical sync on primary CRTC */
1655 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1657 uint32_t crtc_gen_cntl, tmp;
1660 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1661 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1662 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1665 /* Clear the CRTC_VBLANK_SAVE bit */
1666 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1667 for (i = 0; i < rdev->usec_timeout; i++) {
1668 tmp = RREG32(RADEON_CRTC_STATUS);
1669 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1676 /* Wait for vertical sync on secondary CRTC */
1677 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1679 uint32_t crtc2_gen_cntl, tmp;
1682 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1683 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1684 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1687 /* Clear the CRTC_VBLANK_SAVE bit */
1688 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1689 for (i = 0; i < rdev->usec_timeout; i++) {
1690 tmp = RREG32(RADEON_CRTC2_STATUS);
1691 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1698 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1703 for (i = 0; i < rdev->usec_timeout; i++) {
1704 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1713 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1718 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1719 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1720 " Bad things might happen.\n");
1722 for (i = 0; i < rdev->usec_timeout; i++) {
1723 tmp = RREG32(RADEON_RBBM_STATUS);
1724 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1732 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1737 for (i = 0; i < rdev->usec_timeout; i++) {
1738 /* read MC_STATUS */
1739 tmp = RREG32(RADEON_MC_STATUS);
1740 if (tmp & RADEON_MC_IDLE) {
1748 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1750 lockup->last_cp_rptr = cp->rptr;
1751 lockup->last_jiffies = jiffies;
1755 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1756 * @rdev: radeon device structure
1757 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1758 * @cp: radeon_cp structure holding CP information
1760 * We don't need to initialize the lockup tracking information as we will either
1761 * have CP rptr to a different value of jiffies wrap around which will force
1762 * initialization of the lockup tracking informations.
1764 * A possible false positivie is if we get call after while and last_cp_rptr ==
1765 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1766 * if the elapsed time since last call is bigger than 2 second than we return
1767 * false and update the tracking information. Due to this the caller must call
1768 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1769 * the fencing code should be cautious about that.
1771 * Caller should write to the ring to force CP to do something so we don't get
1772 * false positive when CP is just gived nothing to do.
1775 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1777 unsigned long cjiffies, elapsed;
1780 if (!time_after(cjiffies, lockup->last_jiffies)) {
1781 /* likely a wrap around */
1782 lockup->last_cp_rptr = cp->rptr;
1783 lockup->last_jiffies = jiffies;
1786 if (cp->rptr != lockup->last_cp_rptr) {
1787 /* CP is still working no lockup */
1788 lockup->last_cp_rptr = cp->rptr;
1789 lockup->last_jiffies = jiffies;
1792 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
1793 if (elapsed >= 3000) {
1794 /* very likely the improbable case where current
1795 * rptr is equal to last recorded, a while ago, rptr
1796 * this is more likely a false positive update tracking
1797 * information which should force us to be recall at
1800 lockup->last_cp_rptr = cp->rptr;
1801 lockup->last_jiffies = jiffies;
1804 if (elapsed >= 1000) {
1805 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
1808 /* give a chance to the GPU ... */
1812 bool r100_gpu_is_lockup(struct radeon_device *rdev)
1817 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
1818 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
1819 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
1822 /* force CP activities */
1823 r = radeon_ring_lock(rdev, 2);
1826 radeon_ring_write(rdev, 0x80000000);
1827 radeon_ring_write(rdev, 0x80000000);
1828 radeon_ring_unlock_commit(rdev);
1830 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1831 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
1834 void r100_bm_disable(struct radeon_device *rdev)
1838 /* disable bus mastering */
1839 tmp = RREG32(R_000030_BUS_CNTL);
1840 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
1842 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
1844 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
1845 tmp = RREG32(RADEON_BUS_CNTL);
1847 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1848 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1852 int r100_asic_reset(struct radeon_device *rdev)
1854 struct r100_mc_save save;
1857 r100_mc_stop(rdev, &save);
1858 status = RREG32(R_000E40_RBBM_STATUS);
1859 if (!G_000E40_GUI_ACTIVE(status)) {
1862 status = RREG32(R_000E40_RBBM_STATUS);
1863 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1865 WREG32(RADEON_CP_CSQ_CNTL, 0);
1866 tmp = RREG32(RADEON_CP_RB_CNTL);
1867 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1868 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1869 WREG32(RADEON_CP_RB_WPTR, 0);
1870 WREG32(RADEON_CP_RB_CNTL, tmp);
1871 /* save PCI state */
1872 pci_save_state(rdev->pdev);
1873 /* disable bus mastering */
1874 r100_bm_disable(rdev);
1875 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
1876 S_0000F0_SOFT_RESET_RE(1) |
1877 S_0000F0_SOFT_RESET_PP(1) |
1878 S_0000F0_SOFT_RESET_RB(1));
1879 RREG32(R_0000F0_RBBM_SOFT_RESET);
1881 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
1883 status = RREG32(R_000E40_RBBM_STATUS);
1884 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1886 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
1887 RREG32(R_0000F0_RBBM_SOFT_RESET);
1889 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
1891 status = RREG32(R_000E40_RBBM_STATUS);
1892 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1893 /* restore PCI & busmastering */
1894 pci_restore_state(rdev->pdev);
1895 r100_enable_bm(rdev);
1896 /* Check if GPU is idle */
1897 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
1898 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
1899 dev_err(rdev->dev, "failed to reset GPU\n");
1900 rdev->gpu_lockup = true;
1903 r100_mc_resume(rdev, &save);
1904 dev_info(rdev->dev, "GPU reset succeed\n");
1908 void r100_set_common_regs(struct radeon_device *rdev)
1910 struct drm_device *dev = rdev->ddev;
1911 bool force_dac2 = false;
1914 /* set these so they don't interfere with anything */
1915 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1916 WREG32(RADEON_SUBPIC_CNTL, 0);
1917 WREG32(RADEON_VIPH_CONTROL, 0);
1918 WREG32(RADEON_I2C_CNTL_1, 0);
1919 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1920 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1921 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1923 /* always set up dac2 on rn50 and some rv100 as lots
1924 * of servers seem to wire it up to a VGA port but
1925 * don't report it in the bios connector
1928 switch (dev->pdev->device) {
1937 /* DELL triple head servers */
1938 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
1939 ((dev->pdev->subsystem_device == 0x016c) ||
1940 (dev->pdev->subsystem_device == 0x016d) ||
1941 (dev->pdev->subsystem_device == 0x016e) ||
1942 (dev->pdev->subsystem_device == 0x016f) ||
1943 (dev->pdev->subsystem_device == 0x0170) ||
1944 (dev->pdev->subsystem_device == 0x017d) ||
1945 (dev->pdev->subsystem_device == 0x017e) ||
1946 (dev->pdev->subsystem_device == 0x0183) ||
1947 (dev->pdev->subsystem_device == 0x018a) ||
1948 (dev->pdev->subsystem_device == 0x019a)))
1954 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1955 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1956 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
1958 /* For CRT on DAC2, don't turn it on if BIOS didn't
1959 enable it, even it's detected.
1962 /* force it to crtc0 */
1963 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
1964 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
1965 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1967 /* set up the TV DAC */
1968 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
1969 RADEON_TV_DAC_STD_MASK |
1970 RADEON_TV_DAC_RDACPD |
1971 RADEON_TV_DAC_GDACPD |
1972 RADEON_TV_DAC_BDACPD |
1973 RADEON_TV_DAC_BGADJ_MASK |
1974 RADEON_TV_DAC_DACADJ_MASK);
1975 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1976 RADEON_TV_DAC_NHOLD |
1977 RADEON_TV_DAC_STD_PS2 |
1980 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1981 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1982 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1985 /* switch PM block to ACPI mode */
1986 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
1987 tmp &= ~RADEON_PM_MODE_SEL;
1988 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
1995 static void r100_vram_get_type(struct radeon_device *rdev)
1999 rdev->mc.vram_is_ddr = false;
2000 if (rdev->flags & RADEON_IS_IGP)
2001 rdev->mc.vram_is_ddr = true;
2002 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2003 rdev->mc.vram_is_ddr = true;
2004 if ((rdev->family == CHIP_RV100) ||
2005 (rdev->family == CHIP_RS100) ||
2006 (rdev->family == CHIP_RS200)) {
2007 tmp = RREG32(RADEON_MEM_CNTL);
2008 if (tmp & RV100_HALF_MODE) {
2009 rdev->mc.vram_width = 32;
2011 rdev->mc.vram_width = 64;
2013 if (rdev->flags & RADEON_SINGLE_CRTC) {
2014 rdev->mc.vram_width /= 4;
2015 rdev->mc.vram_is_ddr = true;
2017 } else if (rdev->family <= CHIP_RV280) {
2018 tmp = RREG32(RADEON_MEM_CNTL);
2019 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2020 rdev->mc.vram_width = 128;
2022 rdev->mc.vram_width = 64;
2026 rdev->mc.vram_width = 128;
2030 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2035 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2037 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2038 * that is has the 2nd generation multifunction PCI interface
2040 if (rdev->family == CHIP_RV280 ||
2041 rdev->family >= CHIP_RV350) {
2042 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2043 ~RADEON_HDP_APER_CNTL);
2044 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2045 return aper_size * 2;
2048 /* Older cards have all sorts of funny issues to deal with. First
2049 * check if it's a multifunction card by reading the PCI config
2050 * header type... Limit those to one aperture size
2052 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2054 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2055 DRM_INFO("Limiting VRAM to one aperture\n");
2059 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2060 * have set it up. We don't write this as it's broken on some ASICs but
2061 * we expect the BIOS to have done the right thing (might be too optimistic...)
2063 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2064 return aper_size * 2;
2068 void r100_vram_init_sizes(struct radeon_device *rdev)
2070 u64 config_aper_size;
2072 /* work out accessible VRAM */
2073 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2074 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2075 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2076 /* FIXME we don't use the second aperture yet when we could use it */
2077 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2078 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2079 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2080 if (rdev->flags & RADEON_IS_IGP) {
2082 /* read NB_TOM to get the amount of ram stolen for the GPU */
2083 tom = RREG32(RADEON_NB_TOM);
2084 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2085 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2086 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2088 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2089 /* Some production boards of m6 will report 0
2092 if (rdev->mc.real_vram_size == 0) {
2093 rdev->mc.real_vram_size = 8192 * 1024;
2094 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2096 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2097 * Novell bug 204882 + along with lots of ubuntu ones
2099 if (config_aper_size > rdev->mc.real_vram_size)
2100 rdev->mc.mc_vram_size = config_aper_size;
2102 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2106 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2110 temp = RREG32(RADEON_CONFIG_CNTL);
2111 if (state == false) {
2117 WREG32(RADEON_CONFIG_CNTL, temp);
2120 void r100_mc_init(struct radeon_device *rdev)
2124 r100_vram_get_type(rdev);
2125 r100_vram_init_sizes(rdev);
2126 base = rdev->mc.aper_base;
2127 if (rdev->flags & RADEON_IS_IGP)
2128 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2129 radeon_vram_location(rdev, &rdev->mc, base);
2130 if (!(rdev->flags & RADEON_IS_AGP))
2131 radeon_gtt_location(rdev, &rdev->mc);
2132 radeon_update_bandwidth_info(rdev);
2137 * Indirect registers accessor
2139 void r100_pll_errata_after_index(struct radeon_device *rdev)
2141 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
2144 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2145 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2148 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2150 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2151 * or the chip could hang on a subsequent access
2153 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2157 /* This function is required to workaround a hardware bug in some (all?)
2158 * revisions of the R300. This workaround should be called after every
2159 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2160 * may not be correct.
2162 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2165 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2166 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2167 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2168 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2169 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2173 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2177 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2178 r100_pll_errata_after_index(rdev);
2179 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2180 r100_pll_errata_after_data(rdev);
2184 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2186 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2187 r100_pll_errata_after_index(rdev);
2188 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2189 r100_pll_errata_after_data(rdev);
2192 void r100_set_safe_registers(struct radeon_device *rdev)
2194 if (ASIC_IS_RN50(rdev)) {
2195 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2196 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2197 } else if (rdev->family < CHIP_R200) {
2198 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2199 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2201 r200_set_safe_registers(rdev);
2208 #if defined(CONFIG_DEBUG_FS)
2209 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2211 struct drm_info_node *node = (struct drm_info_node *) m->private;
2212 struct drm_device *dev = node->minor->dev;
2213 struct radeon_device *rdev = dev->dev_private;
2214 uint32_t reg, value;
2217 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2218 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2219 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2220 for (i = 0; i < 64; i++) {
2221 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2222 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2223 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2224 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2225 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2230 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2232 struct drm_info_node *node = (struct drm_info_node *) m->private;
2233 struct drm_device *dev = node->minor->dev;
2234 struct radeon_device *rdev = dev->dev_private;
2236 unsigned count, i, j;
2238 radeon_ring_free_size(rdev);
2239 rdp = RREG32(RADEON_CP_RB_RPTR);
2240 wdp = RREG32(RADEON_CP_RB_WPTR);
2241 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2242 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2243 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2244 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2245 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2246 seq_printf(m, "%u dwords in ring\n", count);
2247 for (j = 0; j <= count; j++) {
2248 i = (rdp + j) & rdev->cp.ptr_mask;
2249 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2255 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2257 struct drm_info_node *node = (struct drm_info_node *) m->private;
2258 struct drm_device *dev = node->minor->dev;
2259 struct radeon_device *rdev = dev->dev_private;
2260 uint32_t csq_stat, csq2_stat, tmp;
2261 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2264 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2265 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2266 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2267 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2268 r_rptr = (csq_stat >> 0) & 0x3ff;
2269 r_wptr = (csq_stat >> 10) & 0x3ff;
2270 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2271 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2272 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2273 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2274 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2275 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2276 seq_printf(m, "Ring rptr %u\n", r_rptr);
2277 seq_printf(m, "Ring wptr %u\n", r_wptr);
2278 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2279 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2280 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2281 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2282 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2283 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2284 seq_printf(m, "Ring fifo:\n");
2285 for (i = 0; i < 256; i++) {
2286 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2287 tmp = RREG32(RADEON_CP_CSQ_DATA);
2288 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2290 seq_printf(m, "Indirect1 fifo:\n");
2291 for (i = 256; i <= 512; i++) {
2292 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2293 tmp = RREG32(RADEON_CP_CSQ_DATA);
2294 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2296 seq_printf(m, "Indirect2 fifo:\n");
2297 for (i = 640; i < ib1_wptr; i++) {
2298 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2299 tmp = RREG32(RADEON_CP_CSQ_DATA);
2300 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2305 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2307 struct drm_info_node *node = (struct drm_info_node *) m->private;
2308 struct drm_device *dev = node->minor->dev;
2309 struct radeon_device *rdev = dev->dev_private;
2312 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2313 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2314 tmp = RREG32(RADEON_MC_FB_LOCATION);
2315 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2316 tmp = RREG32(RADEON_BUS_CNTL);
2317 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2318 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2319 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2320 tmp = RREG32(RADEON_AGP_BASE);
2321 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2322 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2323 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2324 tmp = RREG32(0x01D0);
2325 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2326 tmp = RREG32(RADEON_AIC_LO_ADDR);
2327 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2328 tmp = RREG32(RADEON_AIC_HI_ADDR);
2329 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2330 tmp = RREG32(0x01E4);
2331 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2335 static struct drm_info_list r100_debugfs_rbbm_list[] = {
2336 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2339 static struct drm_info_list r100_debugfs_cp_list[] = {
2340 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2341 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2344 static struct drm_info_list r100_debugfs_mc_info_list[] = {
2345 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2349 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2351 #if defined(CONFIG_DEBUG_FS)
2352 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2358 int r100_debugfs_cp_init(struct radeon_device *rdev)
2360 #if defined(CONFIG_DEBUG_FS)
2361 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2367 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2369 #if defined(CONFIG_DEBUG_FS)
2370 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2376 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2377 uint32_t tiling_flags, uint32_t pitch,
2378 uint32_t offset, uint32_t obj_size)
2380 int surf_index = reg * 16;
2383 /* r100/r200 divide by 16 */
2384 if (rdev->family < CHIP_R300)
2389 if (rdev->family <= CHIP_RS200) {
2390 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2391 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2392 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2393 if (tiling_flags & RADEON_TILING_MACRO)
2394 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2395 } else if (rdev->family <= CHIP_RV280) {
2396 if (tiling_flags & (RADEON_TILING_MACRO))
2397 flags |= R200_SURF_TILE_COLOR_MACRO;
2398 if (tiling_flags & RADEON_TILING_MICRO)
2399 flags |= R200_SURF_TILE_COLOR_MICRO;
2401 if (tiling_flags & RADEON_TILING_MACRO)
2402 flags |= R300_SURF_TILE_MACRO;
2403 if (tiling_flags & RADEON_TILING_MICRO)
2404 flags |= R300_SURF_TILE_MICRO;
2407 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2408 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2409 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2410 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2412 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2413 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2414 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2415 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2419 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2421 int surf_index = reg * 16;
2422 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2425 void r100_bandwidth_update(struct radeon_device *rdev)
2427 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2428 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2429 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2430 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2431 fixed20_12 memtcas_ff[8] = {
2440 fixed20_12 memtcas_rs480_ff[8] = {
2450 fixed20_12 memtcas2_ff[8] = {
2460 fixed20_12 memtrbs[8] = {
2470 fixed20_12 memtrbs_r4xx[8] = {
2480 fixed20_12 min_mem_eff;
2481 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2482 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2483 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2484 disp_drain_rate2, read_return_rate;
2485 fixed20_12 time_disp1_drop_priority;
2487 int cur_size = 16; /* in octawords */
2488 int critical_point = 0, critical_point2;
2489 /* uint32_t read_return_rate, time_disp1_drop_priority; */
2490 int stop_req, max_stop_req;
2491 struct drm_display_mode *mode1 = NULL;
2492 struct drm_display_mode *mode2 = NULL;
2493 uint32_t pixel_bytes1 = 0;
2494 uint32_t pixel_bytes2 = 0;
2496 radeon_update_display_priority(rdev);
2498 if (rdev->mode_info.crtcs[0]->base.enabled) {
2499 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2500 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2502 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2503 if (rdev->mode_info.crtcs[1]->base.enabled) {
2504 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2505 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2509 min_mem_eff.full = rfixed_const_8(0);
2511 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2512 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2513 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2514 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2515 /* check crtc enables */
2517 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2519 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2520 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2524 * determine is there is enough bw for current mode
2526 sclk_ff = rdev->pm.sclk;
2527 mclk_ff = rdev->pm.mclk;
2529 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2530 temp_ff.full = rfixed_const(temp);
2531 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
2535 peak_disp_bw.full = 0;
2537 temp_ff.full = rfixed_const(1000);
2538 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
2539 pix_clk.full = rfixed_div(pix_clk, temp_ff);
2540 temp_ff.full = rfixed_const(pixel_bytes1);
2541 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
2544 temp_ff.full = rfixed_const(1000);
2545 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
2546 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
2547 temp_ff.full = rfixed_const(pixel_bytes2);
2548 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
2551 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
2552 if (peak_disp_bw.full >= mem_bw.full) {
2553 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2554 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2557 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2558 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2559 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2560 mem_trcd = ((temp >> 2) & 0x3) + 1;
2561 mem_trp = ((temp & 0x3)) + 1;
2562 mem_tras = ((temp & 0x70) >> 4) + 1;
2563 } else if (rdev->family == CHIP_R300 ||
2564 rdev->family == CHIP_R350) { /* r300, r350 */
2565 mem_trcd = (temp & 0x7) + 1;
2566 mem_trp = ((temp >> 8) & 0x7) + 1;
2567 mem_tras = ((temp >> 11) & 0xf) + 4;
2568 } else if (rdev->family == CHIP_RV350 ||
2569 rdev->family <= CHIP_RV380) {
2571 mem_trcd = (temp & 0x7) + 3;
2572 mem_trp = ((temp >> 8) & 0x7) + 3;
2573 mem_tras = ((temp >> 11) & 0xf) + 6;
2574 } else if (rdev->family == CHIP_R420 ||
2575 rdev->family == CHIP_R423 ||
2576 rdev->family == CHIP_RV410) {
2578 mem_trcd = (temp & 0xf) + 3;
2581 mem_trp = ((temp >> 8) & 0xf) + 3;
2584 mem_tras = ((temp >> 12) & 0x1f) + 6;
2587 } else { /* RV200, R200 */
2588 mem_trcd = (temp & 0x7) + 1;
2589 mem_trp = ((temp >> 8) & 0x7) + 1;
2590 mem_tras = ((temp >> 12) & 0xf) + 4;
2593 trcd_ff.full = rfixed_const(mem_trcd);
2594 trp_ff.full = rfixed_const(mem_trp);
2595 tras_ff.full = rfixed_const(mem_tras);
2597 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2598 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2599 data = (temp & (7 << 20)) >> 20;
2600 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2601 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2602 tcas_ff = memtcas_rs480_ff[data];
2604 tcas_ff = memtcas_ff[data];
2606 tcas_ff = memtcas2_ff[data];
2608 if (rdev->family == CHIP_RS400 ||
2609 rdev->family == CHIP_RS480) {
2610 /* extra cas latency stored in bits 23-25 0-4 clocks */
2611 data = (temp >> 23) & 0x7;
2613 tcas_ff.full += rfixed_const(data);
2616 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2617 /* on the R300, Tcas is included in Trbs.
2619 temp = RREG32(RADEON_MEM_CNTL);
2620 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2622 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2623 temp = RREG32(R300_MC_IND_INDEX);
2624 temp &= ~R300_MC_IND_ADDR_MASK;
2625 temp |= R300_MC_READ_CNTL_CD_mcind;
2626 WREG32(R300_MC_IND_INDEX, temp);
2627 temp = RREG32(R300_MC_IND_DATA);
2628 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2630 temp = RREG32(R300_MC_READ_CNTL_AB);
2631 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2634 temp = RREG32(R300_MC_READ_CNTL_AB);
2635 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2637 if (rdev->family == CHIP_RV410 ||
2638 rdev->family == CHIP_R420 ||
2639 rdev->family == CHIP_R423)
2640 trbs_ff = memtrbs_r4xx[data];
2642 trbs_ff = memtrbs[data];
2643 tcas_ff.full += trbs_ff.full;
2646 sclk_eff_ff.full = sclk_ff.full;
2648 if (rdev->flags & RADEON_IS_AGP) {
2649 fixed20_12 agpmode_ff;
2650 agpmode_ff.full = rfixed_const(radeon_agpmode);
2651 temp_ff.full = rfixed_const_666(16);
2652 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2654 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2656 if (ASIC_IS_R300(rdev)) {
2657 sclk_delay_ff.full = rfixed_const(250);
2659 if ((rdev->family == CHIP_RV100) ||
2660 rdev->flags & RADEON_IS_IGP) {
2661 if (rdev->mc.vram_is_ddr)
2662 sclk_delay_ff.full = rfixed_const(41);
2664 sclk_delay_ff.full = rfixed_const(33);
2666 if (rdev->mc.vram_width == 128)
2667 sclk_delay_ff.full = rfixed_const(57);
2669 sclk_delay_ff.full = rfixed_const(41);
2673 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2675 if (rdev->mc.vram_is_ddr) {
2676 if (rdev->mc.vram_width == 32) {
2677 k1.full = rfixed_const(40);
2680 k1.full = rfixed_const(20);
2684 k1.full = rfixed_const(40);
2688 temp_ff.full = rfixed_const(2);
2689 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2690 temp_ff.full = rfixed_const(c);
2691 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2692 temp_ff.full = rfixed_const(4);
2693 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2694 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2695 mc_latency_mclk.full += k1.full;
2697 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2698 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2701 HW cursor time assuming worst case of full size colour cursor.
2703 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2704 temp_ff.full += trcd_ff.full;
2705 if (temp_ff.full < tras_ff.full)
2706 temp_ff.full = tras_ff.full;
2707 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2709 temp_ff.full = rfixed_const(cur_size);
2710 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2712 Find the total latency for the display data.
2714 disp_latency_overhead.full = rfixed_const(8);
2715 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2716 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2717 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2719 if (mc_latency_mclk.full > mc_latency_sclk.full)
2720 disp_latency.full = mc_latency_mclk.full;
2722 disp_latency.full = mc_latency_sclk.full;
2724 /* setup Max GRPH_STOP_REQ default value */
2725 if (ASIC_IS_RV100(rdev))
2726 max_stop_req = 0x5c;
2728 max_stop_req = 0x7c;
2732 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2733 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2735 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2737 if (stop_req > max_stop_req)
2738 stop_req = max_stop_req;
2741 Find the drain rate of the display buffer.
2743 temp_ff.full = rfixed_const((16/pixel_bytes1));
2744 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2747 Find the critical point of the display buffer.
2749 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2750 crit_point_ff.full += rfixed_const_half(0);
2752 critical_point = rfixed_trunc(crit_point_ff);
2754 if (rdev->disp_priority == 2) {
2759 The critical point should never be above max_stop_req-4. Setting
2760 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2762 if (max_stop_req - critical_point < 4)
2765 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2766 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2767 critical_point = 0x10;
2770 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2771 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2772 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2773 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2774 if ((rdev->family == CHIP_R350) &&
2775 (stop_req > 0x15)) {
2778 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2779 temp |= RADEON_GRPH_BUFFER_SIZE;
2780 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2781 RADEON_GRPH_CRITICAL_AT_SOF |
2782 RADEON_GRPH_STOP_CNTL);
2784 Write the result into the register.
2786 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2787 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2790 if ((rdev->family == CHIP_RS400) ||
2791 (rdev->family == CHIP_RS480)) {
2792 /* attempt to program RS400 disp regs correctly ??? */
2793 temp = RREG32(RS400_DISP1_REG_CNTL);
2794 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2795 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2796 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2797 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2798 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2799 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2800 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2801 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2802 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2803 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2804 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2808 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2809 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2810 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2815 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2817 if (stop_req > max_stop_req)
2818 stop_req = max_stop_req;
2821 Find the drain rate of the display buffer.
2823 temp_ff.full = rfixed_const((16/pixel_bytes2));
2824 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2826 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2827 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2828 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2829 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2830 if ((rdev->family == CHIP_R350) &&
2831 (stop_req > 0x15)) {
2834 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2835 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2836 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2837 RADEON_GRPH_CRITICAL_AT_SOF |
2838 RADEON_GRPH_STOP_CNTL);
2840 if ((rdev->family == CHIP_RS100) ||
2841 (rdev->family == CHIP_RS200))
2842 critical_point2 = 0;
2844 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2845 temp_ff.full = rfixed_const(temp);
2846 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2847 if (sclk_ff.full < temp_ff.full)
2848 temp_ff.full = sclk_ff.full;
2850 read_return_rate.full = temp_ff.full;
2853 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2854 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2856 time_disp1_drop_priority.full = 0;
2858 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2859 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2860 crit_point_ff.full += rfixed_const_half(0);
2862 critical_point2 = rfixed_trunc(crit_point_ff);
2864 if (rdev->disp_priority == 2) {
2865 critical_point2 = 0;
2868 if (max_stop_req - critical_point2 < 4)
2869 critical_point2 = 0;
2873 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2874 /* some R300 cards have problem with this set to 0 */
2875 critical_point2 = 0x10;
2878 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2879 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2881 if ((rdev->family == CHIP_RS400) ||
2882 (rdev->family == CHIP_RS480)) {
2884 /* attempt to program RS400 disp2 regs correctly ??? */
2885 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2886 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2887 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2888 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2889 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2890 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2891 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2892 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2893 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2894 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2895 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2896 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2898 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2899 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2900 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2901 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2904 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2905 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2909 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2911 DRM_ERROR("pitch %d\n", t->pitch);
2912 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2913 DRM_ERROR("width %d\n", t->width);
2914 DRM_ERROR("width_11 %d\n", t->width_11);
2915 DRM_ERROR("height %d\n", t->height);
2916 DRM_ERROR("height_11 %d\n", t->height_11);
2917 DRM_ERROR("num levels %d\n", t->num_levels);
2918 DRM_ERROR("depth %d\n", t->txdepth);
2919 DRM_ERROR("bpp %d\n", t->cpp);
2920 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2921 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2922 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2923 DRM_ERROR("compress format %d\n", t->compress_format);
2926 static int r100_cs_track_cube(struct radeon_device *rdev,
2927 struct r100_cs_track *track, unsigned idx)
2929 unsigned face, w, h;
2930 struct radeon_bo *cube_robj;
2933 for (face = 0; face < 5; face++) {
2934 cube_robj = track->textures[idx].cube_info[face].robj;
2935 w = track->textures[idx].cube_info[face].width;
2936 h = track->textures[idx].cube_info[face].height;
2939 size *= track->textures[idx].cpp;
2941 size += track->textures[idx].cube_info[face].offset;
2943 if (size > radeon_bo_size(cube_robj)) {
2944 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2945 size, radeon_bo_size(cube_robj));
2946 r100_cs_track_texture_print(&track->textures[idx]);
2953 static int r100_track_compress_size(int compress_format, int w, int h)
2955 int block_width, block_height, block_bytes;
2956 int wblocks, hblocks;
2963 switch (compress_format) {
2964 case R100_TRACK_COMP_DXT1:
2969 case R100_TRACK_COMP_DXT35:
2975 hblocks = (h + block_height - 1) / block_height;
2976 wblocks = (w + block_width - 1) / block_width;
2977 if (wblocks < min_wblocks)
2978 wblocks = min_wblocks;
2979 sz = wblocks * hblocks * block_bytes;
2983 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2984 struct r100_cs_track *track)
2986 struct radeon_bo *robj;
2988 unsigned u, i, w, h, d;
2991 for (u = 0; u < track->num_texture; u++) {
2992 if (!track->textures[u].enabled)
2994 robj = track->textures[u].robj;
2996 DRM_ERROR("No texture bound to unit %u\n", u);
3000 for (i = 0; i <= track->textures[u].num_levels; i++) {
3001 if (track->textures[u].use_pitch) {
3002 if (rdev->family < CHIP_R300)
3003 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
3005 w = track->textures[u].pitch / (1 << i);
3007 w = track->textures[u].width;
3008 if (rdev->family >= CHIP_RV515)
3009 w |= track->textures[u].width_11;
3011 if (track->textures[u].roundup_w)
3012 w = roundup_pow_of_two(w);
3014 h = track->textures[u].height;
3015 if (rdev->family >= CHIP_RV515)
3016 h |= track->textures[u].height_11;
3018 if (track->textures[u].roundup_h)
3019 h = roundup_pow_of_two(h);
3020 if (track->textures[u].tex_coord_type == 1) {
3021 d = (1 << track->textures[u].txdepth) / (1 << i);
3027 if (track->textures[u].compress_format) {
3029 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
3030 /* compressed textures are block based */
3034 size *= track->textures[u].cpp;
3036 switch (track->textures[u].tex_coord_type) {
3041 if (track->separate_cube) {
3042 ret = r100_cs_track_cube(rdev, track, u);
3049 DRM_ERROR("Invalid texture coordinate type %u for unit "
3050 "%u\n", track->textures[u].tex_coord_type, u);
3053 if (size > radeon_bo_size(robj)) {
3054 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
3055 "%lu\n", u, size, radeon_bo_size(robj));
3056 r100_cs_track_texture_print(&track->textures[u]);
3063 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3070 for (i = 0; i < track->num_cb; i++) {
3071 if (track->cb[i].robj == NULL) {
3072 if (!(track->fastfill || track->color_channel_mask ||
3073 track->blend_read_enable)) {
3076 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3079 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
3080 size += track->cb[i].offset;
3081 if (size > radeon_bo_size(track->cb[i].robj)) {
3082 DRM_ERROR("[drm] Buffer too small for color buffer %d "
3083 "(need %lu have %lu) !\n", i, size,
3084 radeon_bo_size(track->cb[i].robj));
3085 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
3086 i, track->cb[i].pitch, track->cb[i].cpp,
3087 track->cb[i].offset, track->maxy);
3091 if (track->z_enabled) {
3092 if (track->zb.robj == NULL) {
3093 DRM_ERROR("[drm] No buffer for z buffer !\n");
3096 size = track->zb.pitch * track->zb.cpp * track->maxy;
3097 size += track->zb.offset;
3098 if (size > radeon_bo_size(track->zb.robj)) {
3099 DRM_ERROR("[drm] Buffer too small for z buffer "
3100 "(need %lu have %lu) !\n", size,
3101 radeon_bo_size(track->zb.robj));
3102 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
3103 track->zb.pitch, track->zb.cpp,
3104 track->zb.offset, track->maxy);
3108 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3109 if (track->vap_vf_cntl & (1 << 14)) {
3110 nverts = track->vap_alt_nverts;
3112 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3114 switch (prim_walk) {
3116 for (i = 0; i < track->num_arrays; i++) {
3117 size = track->arrays[i].esize * track->max_indx * 4;
3118 if (track->arrays[i].robj == NULL) {
3119 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3120 "bound\n", prim_walk, i);
3123 if (size > radeon_bo_size(track->arrays[i].robj)) {
3124 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3125 "need %lu dwords have %lu dwords\n",
3126 prim_walk, i, size >> 2,
3127 radeon_bo_size(track->arrays[i].robj)
3129 DRM_ERROR("Max indices %u\n", track->max_indx);
3135 for (i = 0; i < track->num_arrays; i++) {
3136 size = track->arrays[i].esize * (nverts - 1) * 4;
3137 if (track->arrays[i].robj == NULL) {
3138 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3139 "bound\n", prim_walk, i);
3142 if (size > radeon_bo_size(track->arrays[i].robj)) {
3143 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3144 "need %lu dwords have %lu dwords\n",
3145 prim_walk, i, size >> 2,
3146 radeon_bo_size(track->arrays[i].robj)
3153 size = track->vtx_size * nverts;
3154 if (size != track->immd_dwords) {
3155 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
3156 track->immd_dwords, size);
3157 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
3158 nverts, track->vtx_size);
3163 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
3167 return r100_cs_track_texture_check(rdev, track);
3170 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3174 if (rdev->family < CHIP_R300) {
3176 if (rdev->family <= CHIP_RS200)
3177 track->num_texture = 3;
3179 track->num_texture = 6;
3181 track->separate_cube = 1;
3184 track->num_texture = 16;
3186 track->separate_cube = 0;
3189 for (i = 0; i < track->num_cb; i++) {
3190 track->cb[i].robj = NULL;
3191 track->cb[i].pitch = 8192;
3192 track->cb[i].cpp = 16;
3193 track->cb[i].offset = 0;
3195 track->z_enabled = true;
3196 track->zb.robj = NULL;
3197 track->zb.pitch = 8192;
3199 track->zb.offset = 0;
3200 track->vtx_size = 0x7F;
3201 track->immd_dwords = 0xFFFFFFFFUL;
3202 track->num_arrays = 11;
3203 track->max_indx = 0x00FFFFFFUL;
3204 for (i = 0; i < track->num_arrays; i++) {
3205 track->arrays[i].robj = NULL;
3206 track->arrays[i].esize = 0x7F;
3208 for (i = 0; i < track->num_texture; i++) {
3209 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
3210 track->textures[i].pitch = 16536;
3211 track->textures[i].width = 16536;
3212 track->textures[i].height = 16536;
3213 track->textures[i].width_11 = 1 << 11;
3214 track->textures[i].height_11 = 1 << 11;
3215 track->textures[i].num_levels = 12;
3216 if (rdev->family <= CHIP_RS200) {
3217 track->textures[i].tex_coord_type = 0;
3218 track->textures[i].txdepth = 0;
3220 track->textures[i].txdepth = 16;
3221 track->textures[i].tex_coord_type = 1;
3223 track->textures[i].cpp = 64;
3224 track->textures[i].robj = NULL;
3225 /* CS IB emission code makes sure texture unit are disabled */
3226 track->textures[i].enabled = false;
3227 track->textures[i].roundup_w = true;
3228 track->textures[i].roundup_h = true;
3229 if (track->separate_cube)
3230 for (face = 0; face < 5; face++) {
3231 track->textures[i].cube_info[face].robj = NULL;
3232 track->textures[i].cube_info[face].width = 16536;
3233 track->textures[i].cube_info[face].height = 16536;
3234 track->textures[i].cube_info[face].offset = 0;
3239 int r100_ring_test(struct radeon_device *rdev)
3246 r = radeon_scratch_get(rdev, &scratch);
3248 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3251 WREG32(scratch, 0xCAFEDEAD);
3252 r = radeon_ring_lock(rdev, 2);
3254 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3255 radeon_scratch_free(rdev, scratch);
3258 radeon_ring_write(rdev, PACKET0(scratch, 0));
3259 radeon_ring_write(rdev, 0xDEADBEEF);
3260 radeon_ring_unlock_commit(rdev);
3261 for (i = 0; i < rdev->usec_timeout; i++) {
3262 tmp = RREG32(scratch);
3263 if (tmp == 0xDEADBEEF) {
3268 if (i < rdev->usec_timeout) {
3269 DRM_INFO("ring test succeeded in %d usecs\n", i);
3271 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3275 radeon_scratch_free(rdev, scratch);
3279 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3281 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3282 radeon_ring_write(rdev, ib->gpu_addr);
3283 radeon_ring_write(rdev, ib->length_dw);
3286 int r100_ib_test(struct radeon_device *rdev)
3288 struct radeon_ib *ib;
3294 r = radeon_scratch_get(rdev, &scratch);
3296 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3299 WREG32(scratch, 0xCAFEDEAD);
3300 r = radeon_ib_get(rdev, &ib);
3304 ib->ptr[0] = PACKET0(scratch, 0);
3305 ib->ptr[1] = 0xDEADBEEF;
3306 ib->ptr[2] = PACKET2(0);
3307 ib->ptr[3] = PACKET2(0);
3308 ib->ptr[4] = PACKET2(0);
3309 ib->ptr[5] = PACKET2(0);
3310 ib->ptr[6] = PACKET2(0);
3311 ib->ptr[7] = PACKET2(0);
3313 r = radeon_ib_schedule(rdev, ib);
3315 radeon_scratch_free(rdev, scratch);
3316 radeon_ib_free(rdev, &ib);
3319 r = radeon_fence_wait(ib->fence, false);
3323 for (i = 0; i < rdev->usec_timeout; i++) {
3324 tmp = RREG32(scratch);
3325 if (tmp == 0xDEADBEEF) {
3330 if (i < rdev->usec_timeout) {
3331 DRM_INFO("ib test succeeded in %u usecs\n", i);
3333 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3337 radeon_scratch_free(rdev, scratch);
3338 radeon_ib_free(rdev, &ib);
3342 void r100_ib_fini(struct radeon_device *rdev)
3344 radeon_ib_pool_fini(rdev);
3347 int r100_ib_init(struct radeon_device *rdev)
3351 r = radeon_ib_pool_init(rdev);
3353 dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
3357 r = r100_ib_test(rdev);
3359 dev_err(rdev->dev, "failled testing IB (%d).\n", r);
3366 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3368 /* Shutdown CP we shouldn't need to do that but better be safe than
3371 rdev->cp.ready = false;
3372 WREG32(R_000740_CP_CSQ_CNTL, 0);
3374 /* Save few CRTC registers */
3375 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3376 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3377 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3378 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3379 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3380 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3381 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3384 /* Disable VGA aperture access */
3385 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3386 /* Disable cursor, overlay, crtc */
3387 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3388 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3389 S_000054_CRTC_DISPLAY_DIS(1));
3390 WREG32(R_000050_CRTC_GEN_CNTL,
3391 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3392 S_000050_CRTC_DISP_REQ_EN_B(1));
3393 WREG32(R_000420_OV0_SCALE_CNTL,
3394 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3395 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3396 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3397 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3398 S_000360_CUR2_LOCK(1));
3399 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3400 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3401 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3402 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3403 WREG32(R_000360_CUR2_OFFSET,
3404 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3408 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3410 /* Update base address for crtc */
3411 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3412 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3413 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3415 /* Restore CRTC registers */
3416 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3417 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3418 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3419 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3420 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3424 void r100_vga_render_disable(struct radeon_device *rdev)
3428 tmp = RREG8(R_0003C2_GENMO_WT);
3429 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3432 static void r100_debugfs(struct radeon_device *rdev)
3436 r = r100_debugfs_mc_info_init(rdev);
3438 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3441 static void r100_mc_program(struct radeon_device *rdev)
3443 struct r100_mc_save save;
3445 /* Stops all mc clients */
3446 r100_mc_stop(rdev, &save);
3447 if (rdev->flags & RADEON_IS_AGP) {
3448 WREG32(R_00014C_MC_AGP_LOCATION,
3449 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3450 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3451 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3452 if (rdev->family > CHIP_RV200)
3453 WREG32(R_00015C_AGP_BASE_2,
3454 upper_32_bits(rdev->mc.agp_base) & 0xff);
3456 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3457 WREG32(R_000170_AGP_BASE, 0);
3458 if (rdev->family > CHIP_RV200)
3459 WREG32(R_00015C_AGP_BASE_2, 0);
3461 /* Wait for mc idle */
3462 if (r100_mc_wait_for_idle(rdev))
3463 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3464 /* Program MC, should be a 32bits limited address space */
3465 WREG32(R_000148_MC_FB_LOCATION,
3466 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3467 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3468 r100_mc_resume(rdev, &save);
3471 void r100_clock_startup(struct radeon_device *rdev)
3475 if (radeon_dynclks != -1 && radeon_dynclks)
3476 radeon_legacy_set_clock_gating(rdev, 1);
3477 /* We need to force on some of the block */
3478 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3479 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3480 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3481 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3482 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3485 static int r100_startup(struct radeon_device *rdev)
3489 /* set common regs */
3490 r100_set_common_regs(rdev);
3492 r100_mc_program(rdev);
3494 r100_clock_startup(rdev);
3495 /* Initialize GPU configuration (# pipes, ...) */
3496 // r100_gpu_init(rdev);
3497 /* Initialize GART (initialize after TTM so we can allocate
3498 * memory through TTM but finalize after TTM) */
3499 r100_enable_bm(rdev);
3500 if (rdev->flags & RADEON_IS_PCI) {
3501 r = r100_pci_gart_enable(rdev);
3507 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3508 /* 1M ring buffer */
3509 r = r100_cp_init(rdev, 1024 * 1024);
3511 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3514 r = r100_wb_init(rdev);
3516 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3517 r = r100_ib_init(rdev);
3519 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3525 int r100_resume(struct radeon_device *rdev)
3527 /* Make sur GART are not working */
3528 if (rdev->flags & RADEON_IS_PCI)
3529 r100_pci_gart_disable(rdev);
3530 /* Resume clock before doing reset */
3531 r100_clock_startup(rdev);
3532 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3533 if (radeon_asic_reset(rdev)) {
3534 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3535 RREG32(R_000E40_RBBM_STATUS),
3536 RREG32(R_0007C0_CP_STAT));
3539 radeon_combios_asic_init(rdev->ddev);
3540 /* Resume clock after posting */
3541 r100_clock_startup(rdev);
3542 /* Initialize surface registers */
3543 radeon_surface_init(rdev);
3544 return r100_startup(rdev);
3547 int r100_suspend(struct radeon_device *rdev)
3549 r100_cp_disable(rdev);
3550 r100_wb_disable(rdev);
3551 r100_irq_disable(rdev);
3552 if (rdev->flags & RADEON_IS_PCI)
3553 r100_pci_gart_disable(rdev);
3557 void r100_fini(struct radeon_device *rdev)
3559 radeon_pm_fini(rdev);
3563 radeon_gem_fini(rdev);
3564 if (rdev->flags & RADEON_IS_PCI)
3565 r100_pci_gart_fini(rdev);
3566 radeon_agp_fini(rdev);
3567 radeon_irq_kms_fini(rdev);
3568 radeon_fence_driver_fini(rdev);
3569 radeon_bo_fini(rdev);
3570 radeon_atombios_fini(rdev);
3575 int r100_init(struct radeon_device *rdev)
3579 /* Register debugfs file specific to this group of asics */
3582 r100_vga_render_disable(rdev);
3583 /* Initialize scratch registers */
3584 radeon_scratch_init(rdev);
3585 /* Initialize surface registers */
3586 radeon_surface_init(rdev);
3587 /* TODO: disable VGA need to use VGA request */
3589 if (!radeon_get_bios(rdev)) {
3590 if (ASIC_IS_AVIVO(rdev))
3593 if (rdev->is_atom_bios) {
3594 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3597 r = radeon_combios_init(rdev);
3601 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3602 if (radeon_asic_reset(rdev)) {
3604 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3605 RREG32(R_000E40_RBBM_STATUS),
3606 RREG32(R_0007C0_CP_STAT));
3608 /* check if cards are posted or not */
3609 if (radeon_boot_test_post_card(rdev) == false)
3611 /* Set asic errata */
3613 /* Initialize clocks */
3614 radeon_get_clock_info(rdev->ddev);
3615 /* Initialize power management */
3616 radeon_pm_init(rdev);
3617 /* initialize AGP */
3618 if (rdev->flags & RADEON_IS_AGP) {
3619 r = radeon_agp_init(rdev);
3621 radeon_agp_disable(rdev);
3624 /* initialize VRAM */
3627 r = radeon_fence_driver_init(rdev);
3630 r = radeon_irq_kms_init(rdev);
3633 /* Memory manager */
3634 r = radeon_bo_init(rdev);
3637 if (rdev->flags & RADEON_IS_PCI) {
3638 r = r100_pci_gart_init(rdev);
3642 r100_set_safe_registers(rdev);
3643 rdev->accel_working = true;
3644 r = r100_startup(rdev);
3646 /* Somethings want wront with the accel init stop accel */
3647 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3651 radeon_irq_kms_fini(rdev);
3652 if (rdev->flags & RADEON_IS_PCI)
3653 r100_pci_gart_fini(rdev);
3654 rdev->accel_working = false;