__raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
}
- static bool iommu_reset(void __iomem *base, const char *dbgname)
+ static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
{
- bool err = true;
+ bool ret = true;
- err = iommu_enable_stall(base);
- if (!err) {
- pr_info("%s:stall failed: %s\n", __func__, dbgname);
- return err;
+ ret = rockchip_iommu_raw_reset(base);
+ if (!ret) {
+ pr_info("(%s), %s failed\n", dbgname, __func__);
+ return ret;
}
- err = iommu_raw_reset(base);
- if (err) {
- if (base != rk312x_vop_mmu_base)
- __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
+
+ if (base != rk312x_vop_mmu_base)
+ __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
IOMMU_INTERRUPT_READ_BUS_ERROR,
- base+IOMMU_REGISTER_INT_MASK);
- else
- __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
- }
- iommu_disable_stall(base);
- if (!err)
- pr_info("%s: failed: %s\n", __func__, dbgname);
- return err;
+ base + IOMMU_REGISTER_INT_MASK);
+ else
+ __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
+
+ return ret;
}
- static inline void pgtable_flush(void *vastart, void *vaend)
+ static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
{
+#ifdef CONFIG_ARM
dmac_flush_range(vastart, vaend);
outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
+#elif defined(CONFIG_ARM64)
+ __dma_flush_range(vastart, vaend);
+#endif
}
- static void set_fault_handler(struct iommu_drvdata *data,
- rockchip_iommu_fault_handler_t handler)
+ static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
{
- unsigned long flags;
-
- write_lock_irqsave(&data->lock, flags);
- data->fault_handler = handler;
- write_unlock_irqrestore(&data->lock, flags);
+ u32 dte_index, pte_index, page_offset;
+ u32 mmu_dte_addr;
+ phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+ u32 *dte_addr;
+ u32 dte;
+ phys_addr_t pte_addr_phys = 0;
+ u32 *pte_addr = NULL;
+ u32 pte = 0;
+ phys_addr_t page_addr_phys = 0;
+ u32 page_flags = 0;
+
+ dte_index = rockchip_lv1ent_offset(fault_address);
+ pte_index = rockchip_lv2ent_offset(fault_address);
+ page_offset = (u32)(fault_address & 0x00000fff);
+
+ mmu_dte_addr = addr_dte;
+ mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+ dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ dte_addr = phys_to_virt(dte_addr_phys);
+ dte = *dte_addr;
+
+ if (!(IOMMU_FLAGS_PRESENT & dte))
+ goto print_it;
+
+ pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4);
+ pte_addr = phys_to_virt(pte_addr_phys);
+ pte = *pte_addr;
+
+ if (!(IOMMU_FLAGS_PRESENT & pte))
+ goto print_it;
+
+ page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset;
+ page_flags = pte & 0x000001fe;
+
+ print_it:
+ pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n",
+ &fault_address, dte_index, pte_index, page_offset);
+ pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ &mmu_dte_addr_phys, &dte_addr_phys, dte,
+ (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte,
+ (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags);
}
- static int default_fault_handler(struct device *dev,
- enum rk_iommu_inttype itype,
- unsigned long pgtable_base,
- unsigned long fault_addr,
- unsigned int status)
+ static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
{
- struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ /* SYSMMU is in blocked when interrupt occurred. */
+ struct iommu_drvdata *data = dev_id;
+ u32 status;
+ u32 rawstat;
+ dma_addr_t fault_address;
+ int i;
+ unsigned long flags;
+ int ret;
+ u32 reg_status;
- if (!data) {
- dev_err(dev->archdata.iommu,"%s,iommu device not assigned yet\n", __func__);
- return 0;
+ spin_lock_irqsave(&data->data_lock, flags);
+
+ if (!rockchip_is_iommu_active(data)) {
+ spin_unlock_irqrestore(&data->data_lock, flags);
+ return IRQ_HANDLED;
}
- if ((itype >= IOMMU_FAULTS_NUM) || (itype < IOMMU_PAGEFAULT))
- itype = IOMMU_FAULT_UNKNOWN;
- if (itype == IOMMU_BUSERROR)
- dev_err(dev->archdata.iommu,"%s occured at 0x%lx(Page table base: 0x%lx)\n",
- iommu_fault_name[itype], fault_addr, pgtable_base);
+ for (i = 0; i < data->num_res_mem; i++) {
+ status = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_INT_STATUS);
+ if (status == 0)
+ continue;
- if (itype == IOMMU_PAGEFAULT)
- dev_err(dev->archdata.iommu,"IOMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
- fault_addr,
- (status >> 6) & 0x1F,
- (status & 32) ? "write" : "read",
- data->dbgname);
+ rawstat = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_INT_RAWSTAT);
- dev_err(dev->archdata.iommu,"Generating Kernel OOPS... because it is unrecoverable.\n");
+ reg_status = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_STATUS);
- BUG();
+ dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
+ rawstat, status, reg_status);
- return 0;
- }
+ if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
+ u32 dte;
+ int flags;
- static void dump_pagetbl(u32 fault_address, u32 addr_dte)
- {
- u32 lv1_offset;
- u32 lv2_offset;
-
- u32 *lv1_entry_pa;
- u32 *lv1_entry_va;
- u32 *lv1_entry_value;
-
- u32 *lv2_base;
- u32 *lv2_entry_pa;
- u32 *lv2_entry_va;
- u32 *lv2_entry_value;
-
-
- lv1_offset = lv1ent_offset(fault_address);
- lv2_offset = lv2ent_offset(fault_address);
-
- lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
- lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
- lv1_entry_value = (u32 *)(*lv1_entry_va);
-
- lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
- lv2_entry_pa = (u32 *)lv2_base + lv2_offset;
- lv2_entry_va = (u32 *)(__va(lv2_base)) + lv2_offset;
- lv2_entry_value = (u32 *)(*lv2_entry_va);
-
- dev_info(NULL,"fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",
- fault_address, addr_dte, (u32)__va(addr_dte));
- dev_info(NULL,"lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",
- lv1_offset, (u32)lv1_entry_pa, (u32)lv1_entry_va);
- dev_info(NULL,"lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",
- (u32)lv1_entry_value, (u32)lv2_base);
- dev_info(NULL,"lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",
- lv2_offset, (u32)lv2_entry_pa, (u32)lv2_entry_va);
- dev_info(NULL,"lv2_entry value(*lv2_entry_va) = 0x%08x\n",
- (u32)lv2_entry_value);
- }
+ fault_address = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_PAGE_FAULT_ADDR);
- static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
- {
- /* SYSMMU is in blocked when interrupt occurred. */
- struct iommu_drvdata *data = dev_id;
- struct resource *irqres;
- struct platform_device *pdev;
- enum rk_iommu_inttype itype = IOMMU_FAULT_UNKNOWN;
- u32 status;
- u32 rawstat;
- u32 int_status;
- u32 fault_address;
- int i, ret = 0;
+ dte = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_DTE_ADDR);
- read_lock(&data->lock);
+ flags = (status & 32) ? 1 : 0;
- if (!is_iommu_active(data)) {
- read_unlock(&data->lock);
- return IRQ_HANDLED;
- }
-
- if(cpu_is_rk312x() || cpu_is_rk3036())
- rockchip_vcodec_select(data->dbgname);
-
- pdev = to_platform_device(data->iommu);
+ dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n",
+ &fault_address, (status >> 6) & 0x1F,
+ (flags == 1) ? "write" : "read", data->dbgname);
- for (i = 0; i < data->num_res_irq; i++) {
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (irqres && ((int)irqres->start == irq)) {
- if (data->res_bases[i] == rk312x_vop_mmu_base) {
- read_unlock(&data->lock);
- return IRQ_HANDLED;
- }
- break;
+ dump_pagetbl(fault_address, dte);
+
+ if (data->domain)
+ report_iommu_fault(data->domain, data->iommu,
+ fault_address, flags);
+
+ rockchip_iommu_page_fault_done(data->res_bases[i],
+ data->dbgname);
}
- }
- if (i == data->num_res_irq) {
- itype = IOMMU_FAULT_UNKNOWN;
- } else {
- int_status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_STATUS);
-
- if (int_status != 0) {
- /*mask status*/
- __raw_writel(0x00, data->res_bases[i] +
- IOMMU_REGISTER_INT_MASK);
-
- rawstat = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_RAWSTAT);
-
- if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
- fault_address = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_PAGE_FAULT_ADDR);
- itype = IOMMU_PAGEFAULT;
- } else if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
- itype = IOMMU_BUSERROR;
- } else {
- goto out;
- }
- dump_pagetbl(fault_address,
- __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_DTE_ADDR));
- } else {
- goto out;
+ if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
+ dev_err(data->iommu, "bus error occured at %pad\n",
+ &fault_address);
}
- }
- if (data->fault_handler) {
- unsigned long base = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_DTE_ADDR);
+ if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR |
+ IOMMU_INTERRUPT_PAGE_FAULT)) {
+ dev_err(data->iommu, "unexpected int_status: %#08x\n\n",
+ rawstat);
+ }
+
+ __raw_writel(rawstat, data->res_bases[i] +
+ IOMMU_REGISTER_INT_CLEAR);
+
status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_STATUS);
- ret = data->fault_handler(data->dev, itype, base,
- fault_address, status);
- }
+ IOMMU_REGISTER_INT_STATUS);
- if (!ret && (itype != IOMMU_FAULT_UNKNOWN)) {
- if (IOMMU_PAGEFAULT == itype) {
- iommu_zap_tlb(data->res_bases[i]);
- iommu_page_fault_done(data->res_bases[i],
- data->dbgname);
- __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
- IOMMU_INTERRUPT_READ_BUS_ERROR,
- data->res_bases[i] +
- IOMMU_REGISTER_INT_MASK);
- }
- } else {
- dev_err(data->iommu,"(%s) %s is not handled.\n",
- data->dbgname, iommu_fault_name[itype]);
- }
+ rawstat = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_INT_RAWSTAT);
+
+ reg_status = __raw_readl(data->res_bases[i] +
+ IOMMU_REGISTER_STATUS);
- out:
- read_unlock(&data->lock);
+ dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
+ rawstat, status, reg_status);
+ ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
+ if (ret)
+ dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
+ __func__);
+ }
+
+ spin_unlock_irqrestore(&data->data_lock, flags);
return IRQ_HANDLED;
}
__get_str(client), __entry->buf, __entry->size, __entry->kaddr)
);
DEFINE_EVENT(ion_kmap_op, ion_kernel_map,
- TP_PROTO(const char* client, unsigned int buffer, unsigned int size, unsigned int kaddr),
+ TP_PROTO(const char* client, void* buffer, unsigned int size, void* kaddr),
TP_ARGS(client, buffer, size, kaddr));
+ DECLARE_EVENT_CLASS(ion_mmap_op,
+ TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+ unsigned long vm_start, unsigned long vm_end),
+ TP_ARGS(client, buf, size, vm_start, vm_end),
+ TP_STRUCT__entry(
+ __string(client, client)
+ __field(unsigned int, buf)
+ __field(unsigned int, size)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ ),
+ TP_fast_assign(
+ __assign_str(client, client);
+ __entry->buf = buf;
+ __entry->size = size;
+ __entry->vm_start = vm_start;
+ __entry->vm_end = vm_end;
+ ),
+ TP_printk("client=%s,buffer=%08x:%d,vma[%08lx:%08lx]",
+ __get_str(client), __entry->buf, __entry->size,
+ __entry->vm_start, __entry->vm_end)
+ );
+
+ DEFINE_EVENT(ion_mmap_op, ion_buffer_mmap,
+ TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+ unsigned long vm_start, unsigned long vm_end),
+ TP_ARGS(client, buf, size, vm_start, vm_end));
+ DEFINE_EVENT(ion_mmap_op, ion_buffer_munmap,
+ TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+ unsigned long vm_start, unsigned long vm_end),
+ TP_ARGS(client, buf, size, vm_start, vm_end));
#endif /* _TRACE_ION_H */