3 * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <linux/bitops.h>
20 #include <mali_kbase.h>
21 #include <mali_kbase_mem.h>
22 #include <mali_kbase_mmu_hw.h>
23 #if defined(CONFIG_MALI_MIPE_ENABLED)
24 #include <mali_kbase_tlstream.h>
26 #include <backend/gpu/mali_kbase_mmu_hw_direct.h>
27 #include <backend/gpu/mali_kbase_device_internal.h>
29 static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
34 /* can't lock a zero sized range */
35 KBASE_DEBUG_ASSERT(num_pages);
37 region = pfn << PAGE_SHIFT;
39 * fls returns (given the ASSERT above):
43 * results in the range (11 .. 42)
46 /* gracefully handle num_pages being zero */
52 region_width = 10 + fls(num_pages);
53 if (num_pages != (1ul << (region_width - 11))) {
54 /* not pow2, so must go up to the next pow2 */
57 KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
58 KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
59 region |= region_width;
65 static int wait_ready(struct kbase_device *kbdev,
66 unsigned int as_nr, struct kbase_context *kctx)
68 unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
69 u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
71 /* Wait for the MMU status to indicate there is no active command, in
72 * case one is pending. Do not log remaining register accesses. */
73 while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
74 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
77 dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
81 /* If waiting in loop was performed, log last read value. */
82 if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
83 kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
88 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
89 struct kbase_context *kctx)
93 /* write AS_COMMAND when MMU is ready to accept another command */
94 status = wait_ready(kbdev, as_nr, kctx);
96 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
102 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
104 const int num_as = 16;
105 const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
106 const int pf_shift = 0;
107 const unsigned long as_bit_mask = (1UL << num_as) - 1;
113 u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
114 /* page faults (note: Ignore ASes with both pf and bf) */
115 u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
117 KBASE_DEBUG_ASSERT(NULL != kbdev);
119 /* remember current mask */
120 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
121 new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
122 /* mask interrupts for now */
123 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
124 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
126 while (bf_bits | pf_bits) {
129 struct kbase_context *kctx;
132 * the while logic ensures we have a bit set, no need to check
135 as_no = ffs(bf_bits | pf_bits) - 1;
136 as = &kbdev->as[as_no];
139 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
140 * Bus/Page faults _should_ only occur whilst jobs are running,
141 * and a job causing the Bus/Page fault shouldn't complete until
144 kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
146 /* find faulting address */
147 as->fault_addr = kbase_reg_read(kbdev,
151 as->fault_addr <<= 32;
152 as->fault_addr |= kbase_reg_read(kbdev,
157 /* record the fault status */
158 as->fault_status = kbase_reg_read(kbdev,
163 /* find the fault type */
164 as->fault_type = (bf_bits & (1 << as_no)) ?
165 KBASE_MMU_FAULT_TYPE_BUS :
166 KBASE_MMU_FAULT_TYPE_PAGE;
169 if (kbase_as_has_bus_fault(as)) {
170 /* Mark bus fault as handled.
171 * Note that a bus fault is processed first in case
172 * where both a bus fault and page fault occur.
174 bf_bits &= ~(1UL << as_no);
176 /* remove the queued BF (and PF) from the mask */
177 new_mask &= ~(MMU_BUS_ERROR(as_no) |
178 MMU_PAGE_FAULT(as_no));
180 /* Mark page fault as handled */
181 pf_bits &= ~(1UL << as_no);
183 /* remove the queued PF from the mask */
184 new_mask &= ~MMU_PAGE_FAULT(as_no);
187 /* Process the interrupt for this address space */
188 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
189 kbase_mmu_interrupt_process(kbdev, kctx, as);
190 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock,
194 /* reenable interrupts */
195 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
196 tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
198 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
199 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
202 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
203 struct kbase_context *kctx)
205 struct kbase_mmu_setup *current_setup = &as->current_setup;
206 #ifdef CONFIG_MALI_MIPE_ENABLED
211 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
212 current_setup->transtab & 0xFFFFFFFFUL, kctx);
213 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
214 (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
216 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
217 current_setup->memattr & 0xFFFFFFFFUL, kctx);
218 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
219 (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
221 #if defined(CONFIG_MALI_MIPE_ENABLED)
222 kbase_tlstream_tl_attrib_as_config(as,
223 current_setup->transtab,
224 current_setup->memattr,
228 write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
231 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
232 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
233 unsigned int handling_irq)
237 if (op == AS_COMMAND_UNLOCK) {
238 /* Unlock doesn't require a lock first */
239 ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
241 u64 lock_addr = lock_region(kbdev, vpfn, nr);
243 /* Lock the region that needs to be updated */
244 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
245 lock_addr & 0xFFFFFFFFUL, kctx);
246 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
247 (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
248 write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
250 /* Run the MMU operation */
251 write_cmd(kbdev, as->number, op, kctx);
253 /* Wait for the flush to complete */
254 ret = wait_ready(kbdev, as->number, kctx);
256 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
257 /* Issue an UNLOCK command to ensure that valid page
258 tables are re-read by the GPU after an update.
259 Note that, the FLUSH command should perform all the
260 actions necessary, however the bus logs show that if
261 multiple page faults occur within an 8 page region
262 the MMU does not always re-read the updated page
263 table entries for later faults or is only partially
264 read, it subsequently raises the page fault IRQ for
265 the same addresses, the unlock ensures that the MMU
266 cache is flushed, so updates can be re-read. As the
267 region is now unlocked we need to issue 2 UNLOCK
268 commands in order to flush the MMU/uTLB,
271 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
272 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
279 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
280 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
285 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
288 * A reset is in-flight and we're flushing the IRQ + bottom half
289 * so don't update anything as it could race with the reset code.
291 if (kbdev->irq_reset_flush)
294 /* Clear the page (and bus fault IRQ as well in case one occurred) */
295 pf_bf_mask = MMU_PAGE_FAULT(as->number);
296 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
297 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
298 pf_bf_mask |= MMU_BUS_ERROR(as->number);
300 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
303 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
306 void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
307 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
312 /* Enable the page fault IRQ (and bus fault IRQ as well in case one
314 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
317 * A reset is in-flight and we're flushing the IRQ + bottom half
318 * so don't update anything as it could race with the reset code.
320 if (kbdev->irq_reset_flush)
323 irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
324 MMU_PAGE_FAULT(as->number);
326 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
327 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
328 irq_mask |= MMU_BUS_ERROR(as->number);
330 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
333 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);