2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "adreno_gpu.h"
23 struct adreno_rev rev;
26 const char *pm4fw, *pfpfw;
32 static const struct adreno_info gpulist[] = {
34 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
37 .pm4fw = "a300_pm4.fw",
38 .pfpfw = "a300_pfp.fw",
41 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
44 .pm4fw = "a300_pm4.fw",
45 .pfpfw = "a300_pfp.fw",
48 .rev = ADRENO_REV(3, 3, 0, ANY_ID),
51 .pm4fw = "a330_pm4.fw",
52 .pfpfw = "a330_pfp.fw",
57 MODULE_FIRMWARE("a300_pm4.fw");
58 MODULE_FIRMWARE("a300_pfp.fw");
59 MODULE_FIRMWARE("a330_pm4.fw");
60 MODULE_FIRMWARE("a330_pfp.fw");
62 #define RB_SIZE SZ_32K
65 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
67 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
70 case MSM_PARAM_GPU_ID:
71 *value = adreno_gpu->info->revn;
73 case MSM_PARAM_GMEM_SIZE:
74 *value = adreno_gpu->gmem;
76 case MSM_PARAM_CHIP_ID:
77 *value = adreno_gpu->rev.patchid |
78 (adreno_gpu->rev.minor << 8) |
79 (adreno_gpu->rev.major << 16) |
80 (adreno_gpu->rev.core << 24);
83 DBG("%s: invalid param: %u", gpu->name, param);
88 #define rbmemptr(adreno_gpu, member) \
89 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
91 int adreno_hw_init(struct msm_gpu *gpu)
93 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
98 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
101 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
105 /* Setup REG_CP_RB_CNTL: */
106 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
107 /* size is log2(quad-words): */
108 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
109 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
111 /* Setup ringbuffer address: */
112 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
113 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
115 /* Setup scratch/timestamp: */
116 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
118 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
123 static uint32_t get_wptr(struct msm_ringbuffer *ring)
125 return ring->cur - ring->start;
128 uint32_t adreno_last_fence(struct msm_gpu *gpu)
130 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
131 return adreno_gpu->memptrs->fence;
134 void adreno_recover(struct msm_gpu *gpu)
136 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
137 struct drm_device *dev = gpu->dev;
140 gpu->funcs->pm_suspend(gpu);
142 /* reset ringbuffer: */
143 gpu->rb->cur = gpu->rb->start;
145 /* reset completed fence seqno, just discard anything pending: */
146 adreno_gpu->memptrs->fence = gpu->submitted_fence;
147 adreno_gpu->memptrs->rptr = 0;
148 adreno_gpu->memptrs->wptr = 0;
150 gpu->funcs->pm_resume(gpu);
151 ret = gpu->funcs->hw_init(gpu);
153 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
158 int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
159 struct msm_file_private *ctx)
161 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
162 struct msm_drm_private *priv = gpu->dev->dev_private;
163 struct msm_ringbuffer *ring = gpu->rb;
166 for (i = 0; i < submit->nr_cmds; i++) {
167 switch (submit->cmd[i].type) {
168 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
169 /* ignore IB-targets */
171 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
172 /* ignore if there has not been a ctx switch: */
173 if (priv->lastctx == ctx)
175 case MSM_SUBMIT_CMD_BUF:
176 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
177 OUT_RING(ring, submit->cmd[i].iova);
178 OUT_RING(ring, submit->cmd[i].size);
184 /* on a320, at least, we seem to need to pad things out to an
185 * even number of qwords to avoid issue w/ CP hanging on wrap-
191 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
192 OUT_RING(ring, submit->fence);
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Flush HLSQ lazy updates to make sure there is nothing
196 * pending for indirect loads after the timestamp has
199 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
200 OUT_RING(ring, HLSQ_FLUSH);
202 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
203 OUT_RING(ring, 0x00000000);
206 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
207 OUT_RING(ring, CACHE_FLUSH_TS);
208 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
209 OUT_RING(ring, submit->fence);
211 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
212 OUT_PKT3(ring, CP_INTERRUPT, 1);
213 OUT_RING(ring, 0x80000000);
216 if (adreno_is_a3xx(adreno_gpu)) {
217 /* Dummy set-constant to trigger context rollover */
218 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
219 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
220 OUT_RING(ring, 0x00000000);
224 gpu->funcs->flush(gpu);
229 void adreno_flush(struct msm_gpu *gpu)
231 uint32_t wptr = get_wptr(gpu->rb);
233 /* ensure writes to ringbuffer have hit system memory: */
236 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
239 void adreno_idle(struct msm_gpu *gpu)
241 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
242 uint32_t wptr = get_wptr(gpu->rb);
244 /* wait for CP to drain ringbuffer: */
245 if (spin_until(adreno_gpu->memptrs->rptr == wptr))
246 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
248 /* TODO maybe we need to reset GPU here to recover from hang? */
251 #ifdef CONFIG_DEBUG_FS
252 void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
254 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
256 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
257 adreno_gpu->info->revn, adreno_gpu->rev.core,
258 adreno_gpu->rev.major, adreno_gpu->rev.minor,
259 adreno_gpu->rev.patchid);
261 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
262 gpu->submitted_fence);
263 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
264 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
265 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
269 /* would be nice to not have to duplicate the _show() stuff with printk(): */
270 void adreno_dump(struct msm_gpu *gpu)
272 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
274 printk("revision: %d (%d.%d.%d.%d)\n",
275 adreno_gpu->info->revn, adreno_gpu->rev.core,
276 adreno_gpu->rev.major, adreno_gpu->rev.minor,
277 adreno_gpu->rev.patchid);
279 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
280 gpu->submitted_fence);
281 printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
282 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
283 printk("rb wptr: %d\n", get_wptr(gpu->rb));
287 static uint32_t ring_freewords(struct msm_gpu *gpu)
289 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
290 uint32_t size = gpu->rb->size / 4;
291 uint32_t wptr = get_wptr(gpu->rb);
292 uint32_t rptr = adreno_gpu->memptrs->rptr;
293 return (rptr + (size - 1) - wptr) % size;
296 void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
298 if (spin_until(ring_freewords(gpu) >= ndwords))
299 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
302 static const char *iommu_ports[] = {
303 "gfx3d_user", "gfx3d_priv",
304 "gfx3d1_user", "gfx3d1_priv",
307 static inline bool _rev_match(uint8_t entry, uint8_t id)
309 return (entry == ANY_ID) || (entry == id);
312 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
313 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
314 struct adreno_rev rev)
320 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
321 const struct adreno_info *info = &gpulist[i];
322 if (_rev_match(info->rev.core, rev.core) &&
323 _rev_match(info->rev.major, rev.major) &&
324 _rev_match(info->rev.minor, rev.minor) &&
325 _rev_match(info->rev.patchid, rev.patchid)) {
327 gpu->revn = info->revn;
332 if (i == ARRAY_SIZE(gpulist)) {
333 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
334 rev.core, rev.major, rev.minor, rev.patchid);
338 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
339 rev.core, rev.major, rev.minor, rev.patchid);
342 gpu->gmem = gpu->info->gmem;
345 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
347 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
348 gpu->info->pm4fw, ret);
352 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
354 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
355 gpu->info->pfpfw, ret);
359 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
360 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
367 ret = mmu->funcs->attach(mmu, iommu_ports,
368 ARRAY_SIZE(iommu_ports));
373 mutex_lock(&drm->struct_mutex);
374 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
376 mutex_unlock(&drm->struct_mutex);
377 if (IS_ERR(gpu->memptrs_bo)) {
378 ret = PTR_ERR(gpu->memptrs_bo);
379 gpu->memptrs_bo = NULL;
380 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
384 gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
386 dev_err(drm->dev, "could not vmap memptrs\n");
390 ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
393 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
400 void adreno_gpu_cleanup(struct adreno_gpu *gpu)
402 if (gpu->memptrs_bo) {
403 if (gpu->memptrs_iova)
404 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
405 drm_gem_object_unreference(gpu->memptrs_bo);
408 release_firmware(gpu->pm4);
410 release_firmware(gpu->pfp);
411 msm_gpu_cleanup(&gpu->base);