3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <linux/bitops.h>
20 #include <mali_kbase.h>
21 #include <mali_kbase_mem.h>
22 #include <mali_kbase_mmu_hw.h>
23 #include <mali_kbase_mmu_hw_direct.h>
25 #if KBASE_MMU_HW_BACKEND
27 static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
32 /* can't lock a zero sized range */
33 KBASE_DEBUG_ASSERT(num_pages);
35 region = pfn << PAGE_SHIFT;
37 * fls returns (given the ASSERT above):
41 * results in the range (11 .. 42)
44 /* gracefully handle num_pages being zero */
50 region_width = 10 + fls(num_pages);
51 if (num_pages != (1ul << (region_width - 11))) {
52 /* not pow2, so must go up to the next pow2 */
55 KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
56 KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
57 region |= region_width;
63 static int wait_ready(struct kbase_device *kbdev,
64 unsigned int as_nr, struct kbase_context *kctx)
66 unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
68 /* Wait for the MMU status to indicate there is no active command. */
69 while (--max_loops && kbase_reg_read(kbdev,
70 MMU_AS_REG(as_nr, AS_STATUS),
71 kctx) & AS_STATUS_AS_ACTIVE) {
76 dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
83 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
84 struct kbase_context *kctx)
88 /* write AS_COMMAND when MMU is ready to accept another command */
89 status = wait_ready(kbdev, as_nr, kctx);
91 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd, kctx);
96 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
98 const int num_as = 16;
99 const int busfault_shift = MMU_REGS_PAGE_FAULT_FLAGS;
100 const int pf_shift = 0;
101 const unsigned long as_bit_mask = (1UL << num_as) - 1;
107 u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
108 /* page faults (note: Ignore ASes with both pf and bf) */
109 u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
111 KBASE_DEBUG_ASSERT(NULL != kbdev);
113 /* remember current mask */
114 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
115 new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
116 /* mask interrupts for now */
117 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
118 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
120 while (bf_bits | pf_bits) {
123 struct kbase_context *kctx;
126 * the while logic ensures we have a bit set, no need to check
129 as_no = ffs(bf_bits | pf_bits) - 1;
130 as = &kbdev->as[as_no];
133 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
134 * Bus/Page faults _should_ only occur whilst jobs are running,
135 * and a job causing the Bus/Page fault shouldn't complete until
138 kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
140 /* find faulting address */
141 as->fault_addr = kbase_reg_read(kbdev,
142 MMU_AS_REG(as_no, AS_FAULTADDRESS_HI),
144 as->fault_addr <<= 32;
145 as->fault_addr |= kbase_reg_read(kbdev,
146 MMU_AS_REG(as_no, AS_FAULTADDRESS_LO),
149 /* record the fault status */
150 as->fault_status = kbase_reg_read(kbdev,
151 MMU_AS_REG(as_no, AS_FAULTSTATUS),
154 /* find the fault type */
155 as->fault_type = (bf_bits & (1 << as_no)) ?
156 KBASE_MMU_FAULT_TYPE_BUS : KBASE_MMU_FAULT_TYPE_PAGE;
159 if (kbase_as_has_bus_fault(as)) {
161 * Clear the internal JM mask first before clearing the
165 * Always clear the page fault just in case there was
166 * one at the same time as the bus error (bus errors are
167 * always processed in preference to pagefaults should
168 * both happen at the same time).
170 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
171 (1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
172 (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)), kctx);
174 /* mark as handled (note: bf_bits is already shifted) */
175 bf_bits &= ~(1UL << (as_no));
177 /* remove the queued BFs (and PFs) from the mask */
178 new_mask &= ~((1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
179 (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)));
182 * Clear the internal JM mask first before clearing the
185 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
186 1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no),
189 /* mark as handled */
190 pf_bits &= ~(1UL << as_no);
192 /* remove the queued PFs from the mask */
193 new_mask &= ~(1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no));
196 /* Process the interrupt for this address space */
197 kbase_mmu_interrupt_process(kbdev, kctx, as);
200 /* reenable interrupts */
201 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
202 tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
204 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
205 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
208 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
209 struct kbase_context *kctx)
211 struct kbase_mmu_setup *current_setup = &as->current_setup;
213 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
214 current_setup->transtab & 0xFFFFFFFFUL, kctx);
215 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
216 (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
218 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
219 current_setup->memattr & 0xFFFFFFFFUL, kctx);
220 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
221 (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
222 write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
225 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
226 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
227 unsigned int handling_irq)
231 if (op == AS_COMMAND_UNLOCK) {
232 /* Unlock doesn't require a lock first */
233 ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
235 u64 lock_addr = lock_region(kbdev, vpfn, nr);
237 /* Lock the region that needs to be updated */
238 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
239 lock_addr & 0xFFFFFFFFUL, kctx);
240 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
241 (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
242 write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
244 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3285) &&
246 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
247 (1UL << as->number), NULL);
248 write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
251 /* Run the MMU operation */
252 write_cmd(kbdev, as->number, op, kctx);
254 /* Wait for the flush to complete */
255 ret = wait_ready(kbdev, as->number, kctx);
257 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
258 /* Issue an UNLOCK command to ensure that valid page
259 tables are re-read by the GPU after an update.
260 Note that, the FLUSH command should perform all the
261 actions necessary, however the bus logs show that if
262 multiple page faults occur within an 8 page region
263 the MMU does not always re-read the updated page
264 table entries for later faults or is only partially
265 read, it subsequently raises the page fault IRQ for
266 the same addresses, the unlock ensures that the MMU
267 cache is flushed, so updates can be re-read. As the
268 region is now unlocked we need to issue 2 UNLOCK
269 commands in order to flush the MMU/uTLB,
272 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
273 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
280 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
281 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
286 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
287 mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx);
289 mask |= (1UL << MMU_REGS_PAGE_FAULT_FLAG(as->number));
290 if (type == KBASE_MMU_FAULT_TYPE_BUS)
291 mask |= (1UL << MMU_REGS_BUS_ERROR_FLAG(as->number));
293 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, kctx);
294 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);