* published by the Free Software Foundation.
*/
-#ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/mm.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
#include <linux/errno.h>
-#include <linux/memblock.h>
-#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
-
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <linux/of.h>
-#include <linux/rockchip-iovmm.h>
-#include <linux/rockchip/grf.h>
-#include <linux/rockchip/cpu.h>
-#include <linux/rockchip/iomap.h>
-#include <linux/device.h>
-#include "rockchip-iommu.h"
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
+#define RK_MMU_STATUS 0x04
+#define RK_MMU_COMMAND 0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
+#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING 0x24
+
+#define DTE_ADDR_DUMMY 0xCAFEBABE
+#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define RK_MMU_STATUS_IDLE BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
+#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
-/* We does not consider super section mapping (16MB) */
#define SPAGE_ORDER 12
#define SPAGE_SIZE (1 << SPAGE_ORDER)
-#define SPAGE_MASK (~(SPAGE_SIZE - 1))
-
-static void __iomem *rk312x_vop_mmu_base;
-
-enum iommu_entry_flags {
- IOMMU_FLAGS_PRESENT = 0x01,
- IOMMU_FLAGS_READ_PERMISSION = 0x02,
- IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
- IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
- IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
- IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
- IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
- IOMMU_FLAGS_READ_CACHEABLE = 0x80,
- IOMMU_FLAGS_READ_ALLOCATE = 0x100,
- IOMMU_FLAGS_MASK = 0x1FF,
-};
-
-#define rockchip_lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
-#define rockchip_lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
-#define rockchip_lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
-#define rockchip_spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define rockchip_spage_offs(iova) ((iova) & 0x0FFF)
-
-#define rockchip_lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
-#define rockchip_lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
-
-#define NUM_LV1ENTRIES 1024
-#define NUM_LV2ENTRIES 1024
-#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
-
-#define rockchip_lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
-
-#define rockchip_mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
-/*write and read permission for level2 page default*/
-#define rockchip_mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
- IOMMU_FLAGS_READ_PERMISSION | \
- IOMMU_FLAGS_WRITE_PERMISSION)
+ /*
+ * Support mapping any size that fits in one page table:
+ * 4 KiB to 4 MiB
+ */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
#define IOMMU_REG_POLL_COUNT_FAST 1000
-/**
- * MMU register numbers
- * Used in the register read/write routines.
- * See the hardware documentation for more information about each register
- */
-enum iommu_register {
- /**< Current Page Directory Pointer */
- IOMMU_REGISTER_DTE_ADDR = 0x0000,
- /**< Status of the MMU */
- IOMMU_REGISTER_STATUS = 0x0004,
- /**< Command register, used to control the MMU */
- IOMMU_REGISTER_COMMAND = 0x0008,
- /**< Logical address of the last page fault */
- IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
- /**< Used to invalidate the mapping of a single page from the MMU */
- IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
- /**< Raw interrupt status, all interrupts visible */
- IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
- /**< Indicate to the MMU that the interrupt has been received */
- IOMMU_REGISTER_INT_CLEAR = 0x0018,
- /**< Enable/disable types of interrupts */
- IOMMU_REGISTER_INT_MASK = 0x001C,
- /**< Interrupt status based on the mask */
- IOMMU_REGISTER_INT_STATUS = 0x0020,
- IOMMU_REGISTER_AUTO_GATING = 0x0024
-};
-
-enum iommu_command {
- /**< Enable paging (memory translation) */
- IOMMU_COMMAND_ENABLE_PAGING = 0x00,
- /**< Disable paging (memory translation) */
- IOMMU_COMMAND_DISABLE_PAGING = 0x01,
- /**< Enable stall on page fault */
- IOMMU_COMMAND_ENABLE_STALL = 0x02,
- /**< Disable stall on page fault */
- IOMMU_COMMAND_DISABLE_STALL = 0x03,
- /**< Zap the entire page table cache */
- IOMMU_COMMAND_ZAP_CACHE = 0x04,
- /**< Page fault processed */
- IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
- /**< Reset the MMU back to power-on settings */
- IOMMU_COMMAND_HARD_RESET = 0x06
+struct rk_iommu_domain {
+ struct list_head iommus;
+ struct platform_device *pdev;
+ u32 *dt; /* page directory table */
+ dma_addr_t dt_dma;
+ struct mutex iommus_lock; /* lock for iommus list */
+ struct mutex dt_lock; /* lock for modifying page directory table */
+
+ struct iommu_domain domain;
};
-/**
- * MMU interrupt register bits
- * Each cause of the interrupt is reported
- * through the (raw) interrupt status registers.
- * Multiple interrupts can be pending, so multiple bits
- * can be set at once.
- */
-enum iommu_interrupt {
- IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
- IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+struct rk_iommu {
+ struct device *dev;
+ void __iomem **bases;
+ int num_mmu;
+ int *irq;
+ int num_irq;
+ bool reset_disabled; /* isp iommu reset operation would failed */
+ struct list_head node; /* entry in rk_iommu_domain.iommus */
+ struct iommu_domain *domain; /* domain to which iommu is attached */
+ struct clk *aclk; /* aclock belong to master */
+ struct clk *hclk; /* hclock belong to master */
};
-enum iommu_status_bits {
- IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
- IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
- IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
- IOMMU_STATUS_BIT_IDLE = 1 << 3,
- IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
- IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
- IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
-};
+static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
+ unsigned int count)
+{
+ size_t size = count * sizeof(u32); /* count of u32 entry */
-/**
- * Size of an MMU page in bytes
- */
-#define IOMMU_PAGE_SIZE 0x1000
+ dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
+}
-/*
- * Size of the address space referenced by a page table page
- */
-#define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct rk_iommu_domain, domain);
+}
/**
- * Page directory index from address
- * Calculates the page directory index from the given address
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
*/
-#define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+#define rk_wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(50, 100); \
+ } \
+ ret__; \
+})
-/**
- * Page table index from address
- * Calculates the page table index from the given address
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ * DT
+ * MMU_DTE_ADDR -> +-----+
+ * | |
+ * +-----+ PT
+ * | DTE | -> +-----+
+ * +-----+ | | Memory
+ * | | +-----+ Page
+ * | | | PTE | -> +-----+
+ * +-----+ +-----+ | |
+ * | | | |
+ * | | | |
+ * +-----+ | |
+ * | |
+ * | |
+ * +-----+
*/
-#define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
-/**
- * Extract the memory address from an PDE/PTE entry
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:12 - PT address (PTs always starts on a 4 KB boundary)
+ * 11: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
*/
-#define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
-
-#define INVALID_PAGE ((u32)(~0))
+#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
+#define RK_DTE_PT_VALID BIT(0)
-static struct kmem_cache *lv2table_kmem_cache;
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
-static unsigned int *rockchip_section_entry(unsigned int *pgtable, unsigned long iova)
+static inline bool rk_dte_is_pt_valid(u32 dte)
{
- return pgtable + rockchip_lv1ent_offset(iova);
+ return dte & RK_DTE_PT_VALID;
}
-static unsigned int *rockchip_page_entry(unsigned int *sent, unsigned long iova)
+static inline u32 rk_mk_dte(dma_addr_t pt_dma)
{
- return (unsigned int *)phys_to_virt(rockchip_lv2table_base(sent)) +
- rockchip_lv2ent_offset(iova);
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
-struct rk_iommu_domain {
- struct list_head clients; /* list of iommu_drvdata.node */
- unsigned int *pgtable; /* lv1 page table, 4KB */
- short *lv2entcnt; /* free lv2 entry counter for each section */
- spinlock_t lock; /* lock for this structure */
- spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
-};
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ * 31:12 - Page address (Pages always start on a 4 KB boundary)
+ * 11: 9 - Reserved
+ * 8: 1 - Flags
+ * 8 - Read allocate - allocate cache space on read misses
+ * 7 - Read cache - enable cache & prefetch of data
+ * 6 - Write buffer - enable delaying writes on their way to memory
+ * 5 - Write allocate - allocate cache space on write misses
+ * 4 - Write cache - different writes can be merged together
+ * 3 - Override cache attributes
+ * if 1, bits 4-8 control cache attributes
+ * if 0, the system bus defaults are used
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
+#define RK_PTE_PAGE_WRITABLE BIT(2)
+#define RK_PTE_PAGE_READABLE BIT(1)
+#define RK_PTE_PAGE_VALID BIT(0)
-static bool rockchip_set_iommu_active(struct iommu_drvdata *data)
+static inline phys_addr_t rk_pte_page_address(u32 pte)
{
- /* return true if the IOMMU was not active previously
- and it needs to be initialized */
- return ++data->activations == 1;
+ return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
}
-static bool rockchip_set_iommu_inactive(struct iommu_drvdata *data)
+static inline bool rk_pte_is_page_valid(u32 pte)
{
- /* return true if the IOMMU is needed to be disabled */
- BUG_ON(data->activations < 1);
- return --data->activations == 0;
+ return pte & RK_PTE_PAGE_VALID;
}
-static bool rockchip_is_iommu_active(struct iommu_drvdata *data)
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
{
- return data->activations > 0;
+ u32 flags = 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ page &= RK_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | RK_PTE_PAGE_VALID;
}
-static void rockchip_iommu_disable_stall(void __iomem *base)
+static u32 rk_mk_pte_invalid(u32 pte)
{
- int i;
- u32 mmu_status;
+ return pte & ~RK_PTE_PAGE_VALID;
+}
- if (base != rk312x_vop_mmu_base) {
- mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
- } else {
- goto skip_vop_mmu_disable;
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ * 31 22.21 12.11 0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ * 31:22 - DTE index - index of DTE in DT
+ * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
+ * 11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK 0xffc00000
+#define RK_IOVA_DTE_SHIFT 22
+#define RK_IOVA_PTE_MASK 0x003ff000
+#define RK_IOVA_PTE_SHIFT 12
+#define RK_IOVA_PAGE_MASK 0x00000fff
+#define RK_IOVA_PAGE_SHIFT 0
+
+static void rk_iommu_power_on(struct rk_iommu *iommu)
+{
+ if (iommu->aclk && iommu->hclk) {
+ clk_enable(iommu->aclk);
+ clk_enable(iommu->hclk);
}
- if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
- return;
- }
+ pm_runtime_enable(iommu->dev);
+ pm_runtime_get_sync(iommu->dev);
+}
- if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
- pr_info("Aborting MMU disable stall request since it is in pagefault state.\n");
- return;
- }
+static void rk_iommu_power_off(struct rk_iommu *iommu)
+{
+ pm_runtime_put_sync(iommu->dev);
+ pm_runtime_disable(iommu->dev);
- if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) {
- return;
+ if (iommu->aclk && iommu->hclk) {
+ clk_disable(iommu->aclk);
+ clk_disable(iommu->hclk);
}
+}
- __raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND);
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
- skip_vop_mmu_disable:
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
- for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
- u32 status;
-
- if (base != rk312x_vop_mmu_base) {
- status = __raw_readl(base + IOMMU_REGISTER_STATUS);
- } else {
- int j;
- while (j < 5)
- j++;
- return;
- }
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
- if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
- break;
+static u32 rk_iommu_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
- if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
- break;
+static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
- if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
- break;
- }
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+ int i;
- if (IOMMU_REG_POLL_COUNT_FAST == i) {
- pr_info("Disable stall request failed, MMU status is 0x%08X\n",
- __raw_readl(base + IOMMU_REGISTER_STATUS));
- }
+ for (i = 0; i < iommu->num_mmu; i++)
+ writel(command, iommu->bases[i] + RK_MMU_COMMAND);
}
-static bool rockchip_iommu_enable_stall(void __iomem *base)
+static void rk_iommu_base_command(void __iomem *base, u32 command)
+{
+ writel(command, base + RK_MMU_COMMAND);
+}
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
+ size_t size)
{
int i;
+ dma_addr_t iova_end = iova_start + size;
+ /*
+ * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+ * entire iotlb rather than iterate over individual iovas.
+ */
- u32 mmu_status;
-
- if (base != rk312x_vop_mmu_base) {
- mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
- } else {
- goto skip_vop_mmu_enable;
- }
-
- if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
- return true;
- }
+ rk_iommu_power_on(iommu);
- if (mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE){
- pr_info("MMU stall already enabled\n");
- return true;
- }
+ for (i = 0; i < iommu->num_mmu; i++) {
+ dma_addr_t iova;
- if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
- pr_info("Aborting MMU stall request since it is in pagefault state. mmu status is 0x%08x\n",
- mmu_status);
- return false;
+ for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
}
- __raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND);
-
- skip_vop_mmu_enable:
+ rk_iommu_power_off(iommu);
+}
- for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
- if (base != rk312x_vop_mmu_base) {
- mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
- } else {
- int j;
- while (j < 5)
- j++;
- return true;
- }
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+ bool active = true;
+ int i;
- if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
- break;
+ for (i = 0; i < iommu->num_mmu; i++)
+ active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_STALL_ACTIVE);
- if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
- (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
- break;
-
- if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
- break;
- }
+ return active;
+}
- if (IOMMU_REG_POLL_COUNT_FAST == i) {
- pr_info("Enable stall request failed, MMU status is 0x%08X\n",
- __raw_readl(base + IOMMU_REGISTER_STATUS));
- return false;
- }
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+ bool enable = true;
+ int i;
- if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
- pr_info("Aborting MMU stall request since it has a pagefault.\n");
- return false;
- }
+ for (i = 0; i < iommu->num_mmu; i++)
+ enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED);
- return true;
+ return enable;
}
-static bool rockchip_iommu_enable_paging(void __iomem *base)
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
- int i;
+ int ret, i;
- __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
- base + IOMMU_REGISTER_COMMAND);
+ if (rk_iommu_is_stall_active(iommu))
+ return 0;
- for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
- if (base != rk312x_vop_mmu_base) {
- if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
- IOMMU_STATUS_BIT_PAGING_ENABLED)
- break;
- } else {
- int j;
- while (j < 5)
- j++;
- return true;
- }
- }
+ /* Stall can only be enabled if paging is enabled */
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
- if (IOMMU_REG_POLL_COUNT_FAST == i) {
- pr_info("Enable paging request failed, MMU status is 0x%08X\n",
- __raw_readl(base + IOMMU_REGISTER_STATUS));
- return false;
- }
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+ ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
- return true;
+ return ret;
}
-static bool rockchip_iommu_disable_paging(void __iomem *base)
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
- int i;
+ int ret, i;
- __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
- base + IOMMU_REGISTER_COMMAND);
-
- for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
- if (base != rk312x_vop_mmu_base) {
- if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
- IOMMU_STATUS_BIT_PAGING_ENABLED))
- break;
- } else {
- int j;
- while (j < 5)
- j++;
- return true;
- }
- }
+ if (!rk_iommu_is_stall_active(iommu))
+ return 0;
- if (IOMMU_REG_POLL_COUNT_FAST == i) {
- pr_info("Disable paging request failed, MMU status is 0x%08X\n",
- __raw_readl(base + IOMMU_REGISTER_STATUS));
- return false;
- }
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
- return true;
-}
+ ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
-static void rockchip_iommu_page_fault_done(void __iomem *base, const char *dbgname)
-{
- pr_info("MMU: %s: Leaving page fault mode\n",
- dbgname);
- __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
- base + IOMMU_REGISTER_COMMAND);
+ return ret;
}
-static int rockchip_iommu_zap_tlb_without_stall (void __iomem *base)
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
- __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
-
- return 0;
-}
+ int ret, i;
-static int rockchip_iommu_zap_tlb(void __iomem *base)
-{
- if (!rockchip_iommu_enable_stall(base)) {
- pr_err("%s failed\n", __func__);
- return -1;
- }
+ if (rk_iommu_is_paging_enabled(iommu))
+ return 0;
- __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
- rockchip_iommu_disable_stall(base);
+ ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
- return 0;
+ return ret;
}
-static inline bool rockchip_iommu_raw_reset(void __iomem *base)
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
- int i;
- unsigned int ret;
- unsigned int grf_value;
+ int ret, i;
- __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
- if (base != rk312x_vop_mmu_base) {
- ret = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
- if (!(0xCAFEB000 == ret)) {
- grf_value = readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
- pr_info("error when %s. grf = 0x%08x\n", __func__, grf_value);
- return false;
- }
- }
- __raw_writel(IOMMU_COMMAND_HARD_RESET,
- base + IOMMU_REGISTER_COMMAND);
-
- for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
- if (base != rk312x_vop_mmu_base) {
- if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
- break;
- } else {
- int j;
- while (j < 5)
- j++;
- return true;
- }
- }
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
- if (IOMMU_REG_POLL_COUNT_FAST == i) {
- pr_info("%s,Reset request failed, MMU status is 0x%08X\n",
- __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
- return false;
- }
- return true;
-}
+ ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
-static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned int pgd)
-{
- __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
+ return ret;
}
-static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
- bool ret = true;
-
- ret = rockchip_iommu_raw_reset(base);
- if (!ret) {
- pr_info("(%s), %s failed\n", dbgname, __func__);
- return ret;
+ int ret, i;
+ u32 dte_addr;
+
+ /* Workaround for isp mmus */
+ if (iommu->reset_disabled)
+ return 0;
+
+ /*
+ * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+ * and verifying that upper 5 nybbles are read back.
+ */
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+ dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
}
- if (base != rk312x_vop_mmu_base)
- __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
- IOMMU_INTERRUPT_READ_BUS_ERROR,
- base + IOMMU_REGISTER_INT_MASK);
- else
- __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
+ rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
- return ret;
-}
+ for (i = 0; i < iommu->num_mmu; i++) {
+ ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret) {
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ return ret;
+ }
+ }
-static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
-{
-#ifdef CONFIG_ARM
- dmac_flush_range(vastart, vaend);
- outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
-#elif defined(CONFIG_ARM64)
- __dma_flush_range(vastart, vaend);
- //flush_cache_all();
-#endif
+ return 0;
}
-static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
+static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
+ void __iomem *base = iommu->bases[index];
u32 dte_index, pte_index, page_offset;
u32 mmu_dte_addr;
phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
phys_addr_t page_addr_phys = 0;
u32 page_flags = 0;
- dte_index = rockchip_lv1ent_offset(fault_address);
- pte_index = rockchip_lv2ent_offset(fault_address);
- page_offset = (u32)(fault_address & 0x00000fff);
+ dte_index = rk_iova_dte_index(iova);
+ pte_index = rk_iova_pte_index(iova);
+ page_offset = rk_iova_page_offset(iova);
- mmu_dte_addr = addr_dte;
+ mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
dte = *dte_addr;
- if (!(IOMMU_FLAGS_PRESENT & dte))
+ if (!rk_dte_is_pt_valid(dte))
goto print_it;
- pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4);
+ pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
pte_addr = phys_to_virt(pte_addr_phys);
pte = *pte_addr;
- if (!(IOMMU_FLAGS_PRESENT & pte))
+ if (!rk_pte_is_page_valid(pte))
goto print_it;
- page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset;
- page_flags = pte & 0x000001fe;
+ page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
print_it:
- pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n",
- &fault_address, dte_index, pte_index, page_offset);
- pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+ &iova, dte_index, pte_index, page_offset);
+ dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
&mmu_dte_addr_phys, &dte_addr_phys, dte,
- (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte,
- (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags);
+ rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
}
-static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
{
- /* SYSMMU is in blocked when interrupt occurred. */
- struct iommu_drvdata *data = dev_id;
+ struct rk_iommu *iommu = dev_id;
u32 status;
- u32 rawstat;
- dma_addr_t fault_address;
+ u32 int_status;
+ dma_addr_t iova;
+ irqreturn_t ret = IRQ_NONE;
int i;
- unsigned long flags;
- int ret;
- u32 reg_status;
-
- spin_lock_irqsave(&data->data_lock, flags);
-
- if (!rockchip_is_iommu_active(data)) {
- spin_unlock_irqrestore(&data->data_lock, flags);
- return IRQ_HANDLED;
- }
- for (i = 0; i < data->num_res_mem; i++) {
- status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_STATUS);
- if (status == 0)
+ for (i = 0; i < iommu->num_mmu; i++) {
+ int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
+ if (int_status == 0)
continue;
- rawstat = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_RAWSTAT);
-
- reg_status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_STATUS);
-
- dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
- rawstat, status, reg_status);
+ ret = IRQ_HANDLED;
+ iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
- if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
- u32 dte;
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
int flags;
- fault_address = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_PAGE_FAULT_ADDR);
-
- dte = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_DTE_ADDR);
-
- flags = (status & 32) ? 1 : 0;
+ status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+ log_iova(iommu, i, iova);
+
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
- dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n",
- &fault_address, (status >> 6) & 0x1F,
- (flags == 1) ? "write" : "read", data->dbgname);
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
- dump_pagetbl(fault_address, dte);
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
- if (data->domain)
- report_iommu_fault(data->domain, data->iommu,
- fault_address, flags);
- if (data->fault_handler)
- data->fault_handler(data->iommu, IOMMU_PAGEFAULT, dte, fault_address, 1);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
+ }
- rockchip_iommu_page_fault_done(data->res_bases[i],
- data->dbgname);
- }
+ return ret;
+}
- if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
- dev_err(data->iommu, "bus error occured at %pad\n",
- &fault_address);
- }
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ phys_addr_t pt_phys, phys = 0;
+ u32 dte, pte;
+ u32 *page_table;
- if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR |
- IOMMU_INTERRUPT_PAGE_FAULT)) {
- dev_err(data->iommu, "unexpected int_status: %#08x\n\n",
- rawstat);
- }
+ mutex_lock(&rk_domain->dt_lock);
- __raw_writel(rawstat, data->res_bases[i] +
- IOMMU_REGISTER_INT_CLEAR);
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ if (!rk_dte_is_pt_valid(dte))
+ goto out;
- status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_STATUS);
+ pt_phys = rk_dte_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[rk_iova_pte_index(iova)];
+ if (!rk_pte_is_page_valid(pte))
+ goto out;
- rawstat = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_INT_RAWSTAT);
+ phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+ mutex_unlock(&rk_domain->dt_lock);
- reg_status = __raw_readl(data->res_bases[i] +
- IOMMU_REGISTER_STATUS);
+ return phys;
+}
- dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
- rawstat, status, reg_status);
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
+{
+ struct list_head *pos;
- ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
- if (ret)
- dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
- __func__);
+ /* shootdown these iova from all iommus using this domain */
+ mutex_lock(&rk_domain->iommus_lock);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_zap_lines(iommu, iova, size);
}
-
- spin_unlock_irqrestore(&data->data_lock, flags);
- return IRQ_HANDLED;
+ mutex_unlock(&rk_domain->iommus_lock);
}
-static bool rockchip_iommu_disable(struct iommu_drvdata *data)
+static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
{
- unsigned long flags;
- int i;
- bool ret = false;
+ rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+ if (size > SPAGE_SIZE)
+ rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
+ SPAGE_SIZE);
+}
- spin_lock_irqsave(&data->data_lock, flags);
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova)
+{
+ struct device *dev = &rk_domain->pdev->dev;
+ u32 *page_table, *dte_addr;
+ u32 dte_index, dte;
+ phys_addr_t pt_phys;
+ dma_addr_t pt_dma;
- if (!rockchip_set_iommu_inactive(data)) {
- spin_unlock_irqrestore(&data->data_lock, flags);
- dev_info(data->iommu,"(%s) %d times left to be disabled\n",
- data->dbgname, data->activations);
- return ret;
- }
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
- for (i = 0; i < data->num_res_mem; i++) {
- ret = rockchip_iommu_enable_stall(data->res_bases[i]);
- if (!ret) {
- dev_info(data->iommu, "(%s), %s failed\n",
- data->dbgname, __func__);
- spin_unlock_irqrestore(&data->data_lock, flags);
- return false;
- }
+ dte_index = rk_iova_dte_index(iova);
+ dte_addr = &rk_domain->dt[dte_index];
+ dte = *dte_addr;
+ if (rk_dte_is_pt_valid(dte))
+ goto done;
- __raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
- ret = rockchip_iommu_disable_paging(data->res_bases[i]);
- if (!ret) {
- rockchip_iommu_disable_stall(data->res_bases[i]);
- spin_unlock_irqrestore(&data->data_lock, flags);
- dev_info(data->iommu, "%s error\n", __func__);
- return ret;
- }
- rockchip_iommu_disable_stall(data->res_bases[i]);
+ pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pt_dma)) {
+ dev_err(dev, "DMA mapping error while allocating page table\n");
+ free_page((unsigned long)page_table);
+ return ERR_PTR(-ENOMEM);
}
- data->pgtable = 0;
-
- spin_unlock_irqrestore(&data->data_lock, flags);
-
- dev_dbg(data->iommu,"(%s) Disabled\n", data->dbgname);
+ dte = rk_mk_dte(pt_dma);
+ *dte_addr = dte;
- return ret;
+ rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
+ rk_table_flush(rk_domain,
+ rk_domain->dt_dma + dte_index * sizeof(u32), 1);
+done:
+ pt_phys = rk_dte_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
}
-/* __rk_sysmmu_enable: Enables System MMU
- *
- * returns -error if an error occurred and System MMU is not enabled,
- * 0 if the System MMU has been just enabled and 1 if System MMU was already
- * enabled before.
- */
-static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned int pgtable)
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+ u32 *pte_addr, dma_addr_t pte_dma,
+ size_t size)
{
- int i, ret = 0;
- unsigned long flags;
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
- spin_lock_irqsave(&data->data_lock, flags);
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
- if (!rockchip_set_iommu_active(data)) {
- if (WARN_ON(pgtable != data->pgtable)) {
- ret = -EBUSY;
- rockchip_set_iommu_inactive(data);
- } else {
- ret = 1;
- }
-
- spin_unlock_irqrestore(&data->data_lock, flags);
- dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname);
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+ if (!rk_pte_is_page_valid(pte))
+ break;
- return ret;
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
}
- for (i = 0; i < data->num_res_mem; i++) {
- ret = rockchip_iommu_enable_stall(data->res_bases[i]);
- if (!ret) {
- dev_info(data->iommu, "(%s), %s failed\n",
- data->dbgname, __func__);
- spin_unlock_irqrestore(&data->data_lock, flags);
- return -EBUSY;
- }
+ rk_table_flush(rk_domain, pte_dma, pte_count);
- if (!strstr(data->dbgname, "isp")) {
- if (!rockchip_iommu_reset(data->res_bases[i],
- data->dbgname)) {
- spin_unlock_irqrestore(&data->data_lock, flags);
- return -ENOENT;
- }
- }
+ return pte_count * SPAGE_SIZE;
+}
- rockchip_iommu_set_ptbase(data->res_bases[i], pgtable);
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+ dma_addr_t pte_dma, dma_addr_t iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+ phys_addr_t page_phys;
- __raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] +
- IOMMU_REGISTER_COMMAND);
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
- if (strstr(data->dbgname, "isp")) {
- __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
- IOMMU_INTERRUPT_READ_BUS_ERROR,
- data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
- }
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
- ret = rockchip_iommu_enable_paging(data->res_bases[i]);
- if (!ret) {
- spin_unlock_irqrestore(&data->data_lock, flags);
- dev_info(data->iommu, "(%s), %s failed\n",
- data->dbgname, __func__);
- return -EBUSY;
- }
+ if (rk_pte_is_page_valid(pte))
+ goto unwind;
- rockchip_iommu_disable_stall(data->res_bases[i]);
- }
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
- data->pgtable = pgtable;
+ paddr += SPAGE_SIZE;
+ }
- dev_dbg(data->iommu,"(%s) Enabled\n", data->dbgname);
+ rk_table_flush(rk_domain, pte_dma, pte_total);
- spin_unlock_irqrestore(&data->data_lock, flags);
+ /*
+ * Zap the first and last iova to evict from iotlb any previously
+ * mapped cachelines holding stale values for its dte and pte.
+ * We only zap the first and last iova, since only they could have
+ * dte or pte shared with an existing mapping.
+ */
+ rk_iommu_zap_iova_first_last(rk_domain, iova, size);
return 0;
+unwind:
+ /* Unmap the range of iovas that we just mapped */
+ rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
+ pte_count * SPAGE_SIZE);
+
+ iova += pte_count * SPAGE_SIZE;
+ page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+
+ return -EADDRINUSE;
}
-int rockchip_iommu_tlb_invalidate_global(struct device *dev)
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+ phys_addr_t paddr, size_t size, int prot)
{
- unsigned long flags;
- struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
+ u32 *page_table, *pte_addr;
+ u32 dte_index, pte_index;
int ret;
- spin_lock_irqsave(&data->data_lock, flags);
+ mutex_lock(&rk_domain->dt_lock);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_map() guarantees that both iova and size will be
+ * aligned, we will always only be mapping from a single dte here.
+ */
+ page_table = rk_dte_get_page_table(rk_domain, iova);
+ if (IS_ERR(page_table)) {
+ mutex_unlock(&rk_domain->dt_lock);
+ return PTR_ERR(page_table);
+ }
- if (rockchip_is_iommu_active(data)) {
- int i;
+ dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
+ pte_index = rk_iova_pte_index(iova);
+ pte_addr = &page_table[pte_index];
+ pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
+ paddr, size, prot);
- for (i = 0; i < data->num_res_mem; i++) {
- ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
- if (ret)
- dev_err(dev->archdata.iommu, "(%s) %s failed\n",
- data->dbgname, __func__);
- }
- } else {
- dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
- ret = -1;
+ mutex_unlock(&rk_domain->dt_lock);
+
+ return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+ size_t size)
+{
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
+ phys_addr_t pt_phys;
+ u32 dte;
+ u32 *pte_addr;
+ size_t unmap_size;
+
+ mutex_lock(&rk_domain->dt_lock);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_unmap() guarantees that both iova and size will be
+ * aligned, we will always only be unmapping from a single dte here.
+ */
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ /* Just return 0 if iova is unmapped */
+ if (!rk_dte_is_pt_valid(dte)) {
+ mutex_unlock(&rk_domain->dt_lock);
+ return 0;
}
- spin_unlock_irqrestore(&data->data_lock, flags);
+ pt_phys = rk_dte_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+ pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
- return ret;
+ mutex_unlock(&rk_domain->dt_lock);
+
+ /* Shootdown iotlb entries for iova range that was just unmapped */
+ rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+ return unmap_size;
}
-int rockchip_iommu_tlb_invalidate(struct device *dev)
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- unsigned long flags;
- struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-
- if (strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc"))
- return 0;
-
- spin_lock_irqsave(&data->data_lock, flags);
-
- if (rockchip_is_iommu_active(data)) {
- int i;
- int ret;
-
- for (i = 0; i < data->num_res_mem; i++) {
- ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
- if (ret) {
- dev_err(dev->archdata.iommu, "(%s) %s failed\n",
- data->dbgname, __func__);
- spin_unlock_irqrestore(&data->data_lock, flags);
- return ret;
- }
-
- }
- } else {
- dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
+ struct iommu_group *group;
+ struct device *iommu_dev;
+ struct rk_iommu *rk_iommu;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+ iommu_dev = iommu_group_get_iommudata(group);
+ if (!iommu_dev) {
+ dev_info(dev, "Possibly a virtual device\n");
+ return NULL;
}
- spin_unlock_irqrestore(&data->data_lock, flags);
+ rk_iommu = dev_get_drvdata(iommu_dev);
+ iommu_group_put(group);
- return 0;
+ return rk_iommu;
}
-static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
{
- struct rk_iommu_domain *priv = domain->priv;
- unsigned int *entry;
- unsigned long flags;
- phys_addr_t phys = 0;
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ int ret, i;
- spin_lock_irqsave(&priv->pgtablelock, flags);
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
- entry = rockchip_section_entry(priv->pgtable, iova);
- entry = rockchip_page_entry(entry, iova);
- phys = rockchip_spage_phys(entry) + rockchip_spage_offs(iova);
+ rk_iommu_power_on(iommu);
- spin_unlock_irqrestore(&priv->pgtablelock, flags);
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ return ret;
- return phys;
-}
+ ret = rk_iommu_force_reset(iommu);
+ if (ret)
+ return ret;
-static int rockchip_lv2set_page(unsigned int *pent, phys_addr_t paddr,
- size_t size, short *pgcnt)
-{
- if (!rockchip_lv2ent_fault(pent))
- return -EADDRINUSE;
+ iommu->domain = domain;
- *pent = rockchip_mk_lv2ent_spage(paddr);
- rockchip_pgtable_flush(pent, pent + 1);
- *pgcnt -= 1;
- return 0;
-}
+ for (i = 0; i < iommu->num_irq; i++) {
+ ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (ret)
+ return ret;
+ }
-static unsigned int *rockchip_alloc_lv2entry(unsigned int *sent,
- unsigned long iova, short *pgcounter)
-{
- if (rockchip_lv1ent_fault(sent)) {
- unsigned int *pent;
-
- pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
- BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
- if (!pent)
- return NULL;
-
- *sent = rockchip_mk_lv1ent_page(virt_to_phys(pent));
- kmemleak_ignore(pent);
- *pgcounter = NUM_LV2ENTRIES;
- rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES);
- rockchip_pgtable_flush(sent, sent + 1);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+ rk_domain->dt_dma);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
- return rockchip_page_entry(sent, iova);
-}
-static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
-{
- struct rk_iommu_domain *priv = domain->priv;
- unsigned long flags;
- unsigned int *ent;
+ ret = rk_iommu_enable_paging(iommu);
+ if (ret)
+ return ret;
- BUG_ON(priv->pgtable == NULL);
+ mutex_lock(&rk_domain->iommus_lock);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ mutex_unlock(&rk_domain->iommus_lock);
- spin_lock_irqsave(&priv->pgtablelock, flags);
+ dev_dbg(dev, "Attached to iommu domain\n");
- ent = rockchip_section_entry(priv->pgtable, iova);
+ rk_iommu_disable_stall(iommu);
- if (unlikely(rockchip_lv1ent_fault(ent))) {
- if (size > SPAGE_SIZE)
- size = SPAGE_SIZE;
- goto done;
- }
+ return 0;
+}
- /* lv1ent_page(sent) == true here */
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ int i;
- ent = rockchip_page_entry(ent, iova);
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
- if (unlikely(rockchip_lv2ent_fault(ent))) {
- size = SPAGE_SIZE;
- goto done;
+ mutex_lock(&rk_domain->iommus_lock);
+ list_del_init(&iommu->node);
+ mutex_unlock(&rk_domain->iommus_lock);
+
+ /* Ignore error while disabling, just keep going */
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
}
+ rk_iommu_disable_stall(iommu);
- *ent = 0;
- size = SPAGE_SIZE;
- priv->lv2entcnt[rockchip_lv1ent_offset(iova)] += 1;
- goto done;
+ for (i = 0; i < iommu->num_irq; i++) {
+ devm_free_irq(iommu->dev, iommu->irq[i], iommu);
+ }
-done:
- pr_debug("%s:unmap iova 0x%lx/%zx bytes\n",
- __func__, iova,size);
- spin_unlock_irqrestore(&priv->pgtablelock, flags);
+ iommu->domain = NULL;
- return size;
+ rk_iommu_power_off(iommu);
+
+ dev_dbg(dev, "Detached from iommu domain\n");
}
-static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
- struct rk_iommu_domain *priv = domain->priv;
- unsigned int *entry;
- unsigned long flags;
- int ret = -ENOMEM;
- unsigned int *pent;
+ struct rk_iommu_domain *rk_domain;
+ struct platform_device *pdev;
+ struct device *iommu_dev;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ return NULL;
+
+ /* Register a pdev per domain, so DMA API can base on this *dev
+ * even some virtual master doesn't have an iommu slave
+ */
+ pdev = platform_device_register_simple("rk_iommu_domain",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(pdev))
+ return NULL;
+
+ rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ goto err_unreg_pdev;
+
+ rk_domain->pdev = pdev;
+
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&rk_domain->domain))
+ goto err_unreg_pdev;
+
+ /*
+ * rk32xx iommus use a 2 level pagetable.
+ * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+ * Allocate one 4 KiB page for each table.
+ */
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ if (!rk_domain->dt)
+ goto err_put_cookie;
+
+ iommu_dev = &pdev->dev;
+ rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
+ SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
+ dev_err(iommu_dev, "DMA map error for DT\n");
+ goto err_free_dt;
+ }
- BUG_ON(priv->pgtable == NULL);
+ rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
- spin_lock_irqsave(&priv->pgtablelock, flags);
+ mutex_init(&rk_domain->iommus_lock);
+ mutex_init(&rk_domain->dt_lock);
+ INIT_LIST_HEAD(&rk_domain->iommus);
- entry = rockchip_section_entry(priv->pgtable, iova);
+ rk_domain->domain.geometry.aperture_start = 0;
+ rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ rk_domain->domain.geometry.force_aperture = true;
- pent = rockchip_alloc_lv2entry(entry, iova,
- &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
- if (!pent)
- ret = -ENOMEM;
- else
- ret = rockchip_lv2set_page(pent, paddr, size,
- &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
+ return &rk_domain->domain;
- if (ret) {
- pr_info("%s: Failed to map iova 0x%lx/%zx bytes\n", __func__,
- iova, size);
- }
- spin_unlock_irqrestore(&priv->pgtablelock, flags);
+err_free_dt:
+ free_page((unsigned long)rk_domain->dt);
+err_put_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
+err_unreg_pdev:
+ platform_device_unregister(pdev);
- return ret;
+ return NULL;
}
-static void rockchip_iommu_detach_device(struct iommu_domain *domain, struct device *dev)
+static void rk_iommu_domain_free(struct iommu_domain *domain)
{
- struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- struct rk_iommu_domain *priv = domain->priv;
- struct list_head *pos;
- unsigned long flags;
- bool found = false;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ int i;
- spin_lock_irqsave(&priv->lock, flags);
+ WARN_ON(!list_empty(&rk_domain->iommus));
- list_for_each(pos, &priv->clients) {
- if (list_entry(pos, struct iommu_drvdata, node) == data) {
- found = true;
- break;
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ u32 dte = rk_domain->dt[i];
+ if (rk_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ u32 *page_table = phys_to_virt(pt_phys);
+ dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
+ SPAGE_SIZE, DMA_TO_DEVICE);
+ free_page((unsigned long)page_table);
}
}
- if (!found) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- if (rockchip_iommu_disable(data)) {
- if (!(strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc")))
- dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %08lx\n",
- __func__, (unsigned long)virt_to_phys(priv->pgtable));
- data->domain = NULL;
- list_del_init(&data->node);
+ dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
+ SPAGE_SIZE, DMA_TO_DEVICE);
+ free_page((unsigned long)rk_domain->dt);
- } else
- dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %08lx delayed",
- __func__, (unsigned long)virt_to_phys(priv->pgtable));
+ if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
- spin_unlock_irqrestore(&priv->lock, flags);
+ platform_device_unregister(rk_domain->pdev);
}
-static int rockchip_iommu_attach_device(struct iommu_domain *domain, struct device *dev)
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
{
- struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- struct rk_iommu_domain *priv = domain->priv;
- unsigned long flags;
+ struct device_node *np = dev->of_node;
int ret;
- spin_lock_irqsave(&priv->lock, flags);
-
- ret = rockchip_iommu_enable(data, virt_to_phys(priv->pgtable));
-
- if (ret == 0) {
- /* 'data->node' must not be appeared in priv->clients */
- BUG_ON(!list_empty(&data->node));
- list_add_tail(&data->node, &priv->clients);
- data->domain = domain;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (ret < 0) {
- dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %x\n",
- __func__, (unsigned int)virt_to_phys(priv->pgtable));
- } else if (ret > 0) {
- dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%x already attached\n",
- __func__, (unsigned int)virt_to_phys(priv->pgtable));
- } else {
- if (!(strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc")))
- dev_info(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%x\n",
- __func__, (unsigned int)virt_to_phys(priv->pgtable));
- }
-
- return ret;
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+ return (ret > 0);
}
-static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+ struct device *dev)
{
- struct rk_iommu_domain *priv = domain->priv;
- int i;
+ struct device_node *np = dev->of_node;
+ struct platform_device *pd;
+ int ret;
+ struct of_phandle_args args;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+ &args);
+ if (ret) {
+ dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+ np->full_name, ret);
+ return ret;
+ }
+ if (args.args_count != 0) {
+ dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+ args.np->full_name, args.args_count);
+ return -EINVAL;
+ }
- WARN_ON(!list_empty(&priv->clients));
+ pd = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pd) {
+ dev_err(dev, "iommu %s not found\n", args.np->full_name);
+ return -EPROBE_DEFER;
+ }
- for (i = 0; i < NUM_LV1ENTRIES; i++)
- if (rockchip_lv1ent_page(priv->pgtable + i))
- kmem_cache_free(lv2table_kmem_cache,
- phys_to_virt(rockchip_lv2table_base(priv->pgtable + i)));
+ /* TODO(djkurtz): handle multiple slave iommus for a single master */
+ iommu_group_set_iommudata(group, &pd->dev, NULL);
- free_pages((unsigned long)priv->pgtable, 0);
- free_pages((unsigned long)priv->lv2entcnt, 0);
- kfree(domain->priv);
- domain->priv = NULL;
+ return 0;
}
-static int rockchip_iommu_domain_init(struct iommu_domain *domain)
+static int rk_iommu_add_device(struct device *dev)
{
- struct rk_iommu_domain *priv;
+ struct iommu_group *group;
+ int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return -ENODEV;
-/*rk32xx iommu use 2 level pagetable,
- level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
- so alloc a page size for each page table
-*/
- priv->pgtable = (unsigned int *)__get_free_pages(GFP_KERNEL |
- __GFP_ZERO, 0);
- if (!priv->pgtable)
- goto err_pgtable;
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
- priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
- __GFP_ZERO, 0);
- if (!priv->lv2entcnt)
- goto err_counter;
+ ret = iommu_group_add_device(group, dev);
+ if (ret)
+ goto err_put_group;
- rockchip_pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_remove_device;
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->pgtablelock);
- INIT_LIST_HEAD(&priv->clients);
+ iommu_group_put(group);
- domain->priv = priv;
return 0;
-err_counter:
- free_pages((unsigned long)priv->pgtable, 0);
-err_pgtable:
- kfree(priv);
- return -ENOMEM;
+err_remove_device:
+ iommu_group_remove_device(dev);
+err_put_group:
+ iommu_group_put(group);
+ return ret;
}
-static struct iommu_ops rk_iommu_ops = {
- .domain_init = rockchip_iommu_domain_init,
- .domain_destroy = rockchip_iommu_domain_destroy,
- .attach_dev = rockchip_iommu_attach_device,
- .detach_dev = rockchip_iommu_detach_device,
- .map = rockchip_iommu_map,
- .unmap = rockchip_iommu_unmap,
- .iova_to_phys = rockchip_iommu_iova_to_phys,
- .pgsize_bitmap = SPAGE_SIZE,
-};
-
-static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
- unsigned int type)
+static void rk_iommu_remove_device(struct device *dev)
{
- int num = 0;
- int i;
-
- for (i = 0; i < pdev->num_resources; i++) {
- struct resource *r = &pdev->resource[i];
- if (type == resource_type(r))
- num++;
- }
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return;
- return num;
+ iommu_group_remove_device(dev);
}
-static int rockchip_iommu_probe(struct platform_device *pdev)
+static const struct iommu_ops rk_iommu_ops = {
+ .domain_alloc = rk_iommu_domain_alloc,
+ .domain_free = rk_iommu_domain_free,
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .add_device = rk_iommu_add_device,
+ .remove_device = rk_iommu_remove_device,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_domain_probe(struct platform_device *pdev)
{
- int i, ret;
- struct device *dev;
- struct iommu_drvdata *data;
-
- dev = &pdev->dev;
+ struct device *dev = &pdev->dev;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_dbg(dev, "Not enough memory\n");
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
return -ENOMEM;
- }
- dev_set_drvdata(dev, data);
+ /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
+ arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
- if (pdev->dev.of_node)
- of_property_read_string(pdev->dev.of_node, "dbgname",
- &(data->dbgname));
- else
- dev_dbg(dev, "dbgname not assigned in device tree or device node not exist\r\n");
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
- dev_info(dev,"(%s) Enter\n", data->dbgname);
-
- data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
- IORESOURCE_MEM);
- if (0 == data->num_res_mem) {
- dev_err(dev,"can't find iommu memory resource \r\n");
- return -ENOMEM;
- }
- dev_dbg(dev,"data->num_res_mem=%d\n", data->num_res_mem);
+ return 0;
+}
- data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
- IORESOURCE_IRQ);
- if (0 == data->num_res_irq) {
- dev_err(dev,"can't find iommu irq resource \r\n");
- return -ENOMEM;
- }
- dev_dbg(dev,"data->num_res_irq=%d\n", data->num_res_irq);
+static struct platform_driver rk_iommu_domain_driver = {
+ .probe = rk_iommu_domain_probe,
+ .driver = {
+ .name = "rk_iommu_domain",
+ },
+};
- data->res_bases = devm_kmalloc_array(dev, data->num_res_mem,
- sizeof(*data->res_bases), GFP_KERNEL);
- if (data->res_bases == NULL) {
- dev_err(dev, "Not enough memory\n");
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_iommu *iommu;
+ struct resource *res;
+ int num_res = pdev->num_resources;
+ int i;
+
+ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
return -ENOMEM;
- }
- for (i = 0; i < data->num_res_mem; i++) {
- struct resource *res;
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = dev;
+ iommu->num_mmu = 0;
+ iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
+ GFP_KERNEL);
+ if (!iommu->bases)
+ return -ENOMEM;
+
+ for (i = 0; i < num_res; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(dev,"Unable to find IOMEM region\n");
- return -ENOENT;
- }
+ if (!res)
+ continue;
+ iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->bases[i]))
+ continue;
+ iommu->num_mmu++;
+ }
+ if (iommu->num_mmu == 0)
+ return PTR_ERR(iommu->bases[0]);
- data->res_bases[i] = devm_ioremap(dev,res->start,
- resource_size(res));
- if (!data->res_bases[i]) {
- dev_err(dev, "Unable to map IOMEM @ PA:%pa\n",
- &res->start);
- return -ENOMEM;
- }
+ while (platform_get_irq(pdev, iommu->num_irq) >= 0)
+ iommu->num_irq++;
- dev_dbg(dev,"res->start = 0x%pa ioremap to data->res_bases[%d] = %p\n",
- &res->start, i, data->res_bases[i]);
+ iommu->irq = devm_kzalloc(dev, sizeof(*iommu->irq) * iommu->num_irq,
+ GFP_KERNEL);
+ if (!iommu->irq)
+ return -ENOMEM;
- if (strstr(data->dbgname, "vop") &&
- (soc_is_rk3128() || soc_is_rk3126())) {
- rk312x_vop_mmu_base = data->res_bases[0];
- dev_dbg(dev, "rk312x_vop_mmu_base = %p\n",
- rk312x_vop_mmu_base);
+ for (i = 0; i < iommu->num_irq; i++) {
+ iommu->irq[i] = platform_get_irq(pdev, i);
+ if (iommu->irq[i] < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
+ return -ENXIO;
}
}
- for (i = 0; i < data->num_res_irq; i++) {
- if ((soc_is_rk3128() || soc_is_rk3126()) &&
- strstr(data->dbgname, "vop")) {
- dev_info(dev, "skip request vop mmu irq\n");
- continue;
- }
+ iommu->reset_disabled = device_property_read_bool(dev,
+ "rk_iommu,disable_reset_quirk");
- ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
- dev_err(dev,"Unable to find IRQ resource\n");
- return -ENOENT;
- }
-
- ret = devm_request_irq(dev, ret, rockchip_iommu_irq,
- IRQF_SHARED, dev_name(dev), data);
- if (ret) {
- dev_err(dev, "Unabled to register interrupt handler\n");
- return -ENOENT;
- }
+ iommu->aclk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(iommu->aclk)) {
+ dev_info(dev, "can't get aclk\n");
+ iommu->aclk = NULL;
}
- ret = rockchip_init_iovmm(dev, &data->vmm);
- if (ret)
- return ret;
+ iommu->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(iommu->hclk)) {
+ dev_info(dev, "can't get hclk\n");
+ iommu->hclk = NULL;
+ }
- data->iommu = dev;
- spin_lock_init(&data->data_lock);
- INIT_LIST_HEAD(&data->node);
+ if (iommu->aclk && iommu->hclk) {
+ clk_prepare(iommu->aclk);
+ clk_prepare(iommu->hclk);
+ }
- dev_info(dev,"(%s) Initialized\n", data->dbgname);
+ return 0;
+}
+static int rk_iommu_remove(struct platform_device *pdev)
+{
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id iommu_dt_ids[] = {
- { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
- { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
- { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
- { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
- { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
- { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
- { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
- { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
- { /* end */ }
+static const struct of_device_id rk_iommu_dt_ids[] = {
+ { .compatible = "rockchip,iommu" },
+ { /* sentinel */ }
};
-
-MODULE_DEVICE_TABLE(of, iommu_dt_ids);
-#endif
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
static struct platform_driver rk_iommu_driver = {
- .probe = rockchip_iommu_probe,
- .remove = NULL,
+ .probe = rk_iommu_probe,
+ .remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(iommu_dt_ids),
+ .of_match_table = rk_iommu_dt_ids,
},
};
-static int __init rockchip_iommu_init_driver(void)
+static int __init rk_iommu_init(void)
{
+ struct device_node *np;
int ret;
- lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
- LV2TABLE_SIZE, LV2TABLE_SIZE,
- 0, NULL);
- if (!lv2table_kmem_cache) {
- pr_info("%s: failed to create kmem cache\n", __func__);
- return -ENOMEM;
- }
+ np = of_find_matching_node(NULL, rk_iommu_dt_ids);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
if (ret)
return ret;
- return platform_driver_register(&rk_iommu_driver);
+ ret = platform_driver_register(&rk_iommu_domain_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&rk_iommu_driver);
+ if (ret)
+ platform_driver_unregister(&rk_iommu_domain_driver);
+ return ret;
}
+static void __exit rk_iommu_exit(void)
+{
+ platform_driver_unregister(&rk_iommu_driver);
+ platform_driver_unregister(&rk_iommu_domain_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
-core_initcall(rockchip_iommu_init_driver);
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");