static struct kmem_cache *lv2table_kmem_cache;
-static unsigned long *rockchip_section_entry(unsigned long *pgtable, unsigned long iova)
+static unsigned int *rockchip_section_entry(unsigned int *pgtable, unsigned long iova)
{
return pgtable + rockchip_lv1ent_offset(iova);
}
-static unsigned long *rockchip_page_entry(unsigned long *sent, unsigned long iova)
+static unsigned int *rockchip_page_entry(unsigned int *sent, unsigned long iova)
{
- return (unsigned long *)__va(rockchip_lv2table_base(sent)) +
+ return (unsigned int *)phys_to_virt(rockchip_lv2table_base(sent)) +
rockchip_lv2ent_offset(iova);
}
struct rk_iommu_domain {
struct list_head clients; /* list of iommu_drvdata.node */
- unsigned long *pgtable; /* lv1 page table, 4KB */
+ unsigned int *pgtable; /* lv1 page table, 4KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
}
if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) {
- pr_info("MMU stall already disabled\n");
return;
}
- skip_vop_mmu_disable:
__raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND);
+ skip_vop_mmu_disable:
+
for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
u32 status;
return false;
}
- skip_vop_mmu_enable:
__raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND);
+ skip_vop_mmu_enable:
+
for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
if (base != rk312x_vop_mmu_base) {
mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
return true;
}
}
+
if (IOMMU_REG_POLL_COUNT_FAST == i) {
pr_info("Enable paging request failed, MMU status is 0x%08X\n",
__raw_readl(base + IOMMU_REGISTER_STATUS));
return false;
}
+
return true;
}
return true;
}
}
+
if (IOMMU_REG_POLL_COUNT_FAST == i) {
pr_info("Disable paging request failed, MMU status is 0x%08X\n",
__raw_readl(base + IOMMU_REGISTER_STATUS));
return false;
}
+
return true;
}
base + IOMMU_REGISTER_COMMAND);
}
+static int rockchip_iommu_zap_tlb_without_stall (void __iomem *base)
+{
+ __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
+
+ return 0;
+}
+
static int rockchip_iommu_zap_tlb(void __iomem *base)
{
if (!rockchip_iommu_enable_stall(base)) {
{
int i;
unsigned int ret;
+ unsigned int grf_value;
__raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
if (base != rk312x_vop_mmu_base) {
ret = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
if (!(0xCAFEB000 == ret)) {
- pr_info("error when %s.\n", __func__);
+ grf_value = readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
+ pr_info("error when %s. grf = 0x%08x\n", __func__, grf_value);
return false;
}
}
return true;
}
-static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned long pgd)
+static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned int pgd)
{
__raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
}
{
bool ret = true;
- ret = rockchip_iommu_enable_stall(base);
- if (!ret) {
- pr_info("%s:stall failed: %s\n", __func__, dbgname);
- return ret;
- }
-
ret = rockchip_iommu_raw_reset(base);
if (!ret) {
pr_info("(%s), %s failed\n", dbgname, __func__);
else
__raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
- rockchip_iommu_disable_stall(base);
-
return ret;
}
static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
{
+#ifdef CONFIG_ARM
dmac_flush_range(vastart, vaend);
outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
+#elif defined(CONFIG_ARM64)
+ __dma_flush_range(vastart, vaend);
+ //flush_cache_all();
+#endif
}
static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
if (data->domain)
report_iommu_fault(data->domain, data->iommu,
fault_address, flags);
+ if (data->fault_handler)
+ data->fault_handler(data->iommu, IOMMU_PAGEFAULT, dte, fault_address, 1);
rockchip_iommu_page_fault_done(data->res_bases[i],
data->dbgname);
dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
rawstat, status, reg_status);
- ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
+ ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
if (ret)
dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
__func__);
}
for (i = 0; i < data->num_res_mem; i++) {
+ ret = rockchip_iommu_enable_stall(data->res_bases[i]);
+ if (!ret) {
+ dev_info(data->iommu, "(%s), %s failed\n",
+ data->dbgname, __func__);
+ spin_unlock_irqrestore(&data->data_lock, flags);
+ return false;
+ }
+
__raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
+
ret = rockchip_iommu_disable_paging(data->res_bases[i]);
if (!ret) {
+ rockchip_iommu_disable_stall(data->res_bases[i]);
spin_unlock_irqrestore(&data->data_lock, flags);
dev_info(data->iommu, "%s error\n", __func__);
return ret;
}
+ rockchip_iommu_disable_stall(data->res_bases[i]);
}
data->pgtable = 0;
spin_unlock_irqrestore(&data->data_lock, flags);
- dev_info(data->iommu,"(%s) Disabled\n", data->dbgname);
+ dev_dbg(data->iommu,"(%s) Disabled\n", data->dbgname);
return ret;
}
* 0 if the System MMU has been just enabled and 1 if System MMU was already
* enabled before.
*/
-static int rockchip_iommu_enable(struct iommu_drvdata *data,
- unsigned long pgtable,
- struct iommu_domain *domain)
+static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned int pgtable)
{
int i, ret = 0;
unsigned long flags;
spin_unlock_irqrestore(&data->data_lock, flags);
dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname);
-
- return ret;
- }
- data->pgtable = pgtable;
+ return ret;
+ }
for (i = 0; i < data->num_res_mem; i++) {
ret = rockchip_iommu_enable_stall(data->res_bases[i]);
return -EBUSY;
}
+ if (!strstr(data->dbgname, "isp")) {
+ if (!rockchip_iommu_reset(data->res_bases[i],
+ data->dbgname)) {
+ spin_unlock_irqrestore(&data->data_lock, flags);
+ return -ENOENT;
+ }
+ }
+
rockchip_iommu_set_ptbase(data->res_bases[i], pgtable);
+
__raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] +
IOMMU_REGISTER_COMMAND);
- __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
- IOMMU_INTERRUPT_READ_BUS_ERROR,
+
+ if (strstr(data->dbgname, "isp")) {
+ __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
+ IOMMU_INTERRUPT_READ_BUS_ERROR,
data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
+ }
+
ret = rockchip_iommu_enable_paging(data->res_bases[i]);
if (!ret) {
spin_unlock_irqrestore(&data->data_lock, flags);
data->dbgname, __func__);
return -EBUSY;
}
+
rockchip_iommu_disable_stall(data->res_bases[i]);
}
- dev_info(data->iommu,"(%s) Enabled\n", data->dbgname);
+ data->pgtable = pgtable;
+
+ dev_dbg(data->iommu,"(%s) Enabled\n", data->dbgname);
spin_unlock_irqrestore(&data->data_lock, flags);
return 0;
}
+int rockchip_iommu_tlb_invalidate_global(struct device *dev)
+{
+ unsigned long flags;
+ struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ int ret;
+
+ spin_lock_irqsave(&data->data_lock, flags);
+
+ if (rockchip_is_iommu_active(data)) {
+ int i;
+
+ for (i = 0; i < data->num_res_mem; i++) {
+ ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
+ if (ret)
+ dev_err(dev->archdata.iommu, "(%s) %s failed\n",
+ data->dbgname, __func__);
+ }
+ } else {
+ dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ ret = -1;
+ }
+
+ spin_unlock_irqrestore(&data->data_lock, flags);
+
+ return ret;
+}
+
int rockchip_iommu_tlb_invalidate(struct device *dev)
{
unsigned long flags;
struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ if (strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc"))
+ return 0;
+
spin_lock_irqsave(&data->data_lock, flags);
if (rockchip_is_iommu_active(data)) {
dma_addr_t iova)
{
struct rk_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ unsigned int *entry;
unsigned long flags;
phys_addr_t phys = 0;
return phys;
}
-static int rockchip_lv2set_page(unsigned long *pent, phys_addr_t paddr,
+static int rockchip_lv2set_page(unsigned int *pent, phys_addr_t paddr,
size_t size, short *pgcnt)
{
if (!rockchip_lv2ent_fault(pent))
return 0;
}
-static unsigned long *rockchip_alloc_lv2entry(unsigned long *sent,
+static unsigned int *rockchip_alloc_lv2entry(unsigned int *sent,
unsigned long iova, short *pgcounter)
{
if (rockchip_lv1ent_fault(sent)) {
- unsigned long *pent;
+ unsigned int *pent;
pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
if (!pent)
return NULL;
- *sent = rockchip_mk_lv1ent_page(__pa(pent));
+ *sent = rockchip_mk_lv1ent_page(virt_to_phys(pent));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES);
{
struct rk_iommu_domain *priv = domain->priv;
unsigned long flags;
- unsigned long *ent;
+ unsigned int *ent;
BUG_ON(priv->pgtable == NULL);
goto done;
done:
- #if 0
- pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
+ pr_debug("%s:unmap iova 0x%lx/%zx bytes\n",
__func__, iova,size);
- #endif
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return size;
phys_addr_t paddr, size_t size, int prot)
{
struct rk_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ unsigned int *entry;
unsigned long flags;
int ret = -ENOMEM;
- unsigned long *pent;
+ unsigned int *pent;
BUG_ON(priv->pgtable == NULL);
&priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
if (ret) {
- pr_info("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
+ pr_info("%s: Failed to map iova 0x%lx/%zx bytes\n", __func__,
iova, size);
}
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return ret;
}
-static void rockchip_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static void rockchip_iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct rk_iommu_domain *priv = domain->priv;
}
if (rockchip_iommu_disable(data)) {
- dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
+ if (!(strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc")))
+ dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %08lx\n",
+ __func__, (unsigned long)virt_to_phys(priv->pgtable));
data->domain = NULL;
list_del_init(&data->node);
} else
- dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %#lx delayed",
- __func__, __pa(priv->pgtable));
+ dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %08lx delayed",
+ __func__, (unsigned long)virt_to_phys(priv->pgtable));
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int rockchip_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+static int rockchip_iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct rk_iommu_domain *priv = domain->priv;
spin_lock_irqsave(&priv->lock, flags);
- ret = rockchip_iommu_enable(data, __pa(priv->pgtable), domain);
+ ret = rockchip_iommu_enable(data, virt_to_phys(priv->pgtable));
if (ret == 0) {
/* 'data->node' must not be appeared in priv->clients */
spin_unlock_irqrestore(&priv->lock, flags);
if (ret < 0) {
- dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
+ dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %x\n",
+ __func__, (unsigned int)virt_to_phys(priv->pgtable));
} else if (ret > 0) {
- dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%lx already attached\n",
- __func__, __pa(priv->pgtable));
+ dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%x already attached\n",
+ __func__, (unsigned int)virt_to_phys(priv->pgtable));
} else {
- dev_dbg(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%lx\n",
- __func__, __pa(priv->pgtable));
+ if (!(strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc")))
+ dev_info(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%x\n",
+ __func__, (unsigned int)virt_to_phys(priv->pgtable));
}
return ret;
for (i = 0; i < NUM_LV1ENTRIES; i++)
if (rockchip_lv1ent_page(priv->pgtable + i))
kmem_cache_free(lv2table_kmem_cache,
- __va(rockchip_lv2table_base(priv->pgtable + i)));
+ phys_to_virt(rockchip_lv2table_base(priv->pgtable + i)));
free_pages((unsigned long)priv->pgtable, 0);
free_pages((unsigned long)priv->lv2entcnt, 0);
level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
so alloc a page size for each page table
*/
- priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
+ priv->pgtable = (unsigned int *)__get_free_pages(GFP_KERNEL |
__GFP_ZERO, 0);
if (!priv->pgtable)
goto err_pgtable;
}
static struct iommu_ops rk_iommu_ops = {
- .domain_init = &rockchip_iommu_domain_init,
- .domain_destroy = &rockchip_iommu_domain_destroy,
- .attach_dev = &rockchip_iommu_attach_device,
- .detach_dev = &rockchip_iommu_detach_device,
- .map = &rockchip_iommu_map,
- .unmap = &rockchip_iommu_unmap,
- .iova_to_phys = &rockchip_iommu_iova_to_phys,
+ .domain_init = rockchip_iommu_domain_init,
+ .domain_destroy = rockchip_iommu_domain_destroy,
+ .attach_dev = rockchip_iommu_attach_device,
+ .detach_dev = rockchip_iommu_detach_device,
+ .map = rockchip_iommu_map,
+ .unmap = rockchip_iommu_unmap,
+ .iova_to_phys = rockchip_iommu_iova_to_phys,
.pgsize_bitmap = SPAGE_SIZE,
};
{
int num = 0;
int i;
-#if 0
- pr_info("dev num_resources %d type = 0x%08x\n",pdev->num_resources, type);
-#endif
+
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
-#if 0
-dev_info(&pdev->dev, "r[%d] start %08x end %08x flags %08lx name (%s) resource_type %08lx\n", i, r->start, r->end, r->flags, r->name, resource_type(r));
-#endif
if (type == resource_type(r))
num++;
}
struct iommu_drvdata *data;
dev = &pdev->dev;
-
-#if 0
-struct resource *res = pdev->resource;
-
-for (i = 0; i < pdev->num_resources; i++, res++) {
- pr_info("r[%d] start %08x end %08x flags %08lx name (%s) resource_type %08lx\n", i, res->start, res->end, res->flags, res->name, resource_type(res));
-}
-#endif
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data) {
data->res_bases[i] = devm_ioremap(dev,res->start,
resource_size(res));
if (!data->res_bases[i]) {
- dev_err(dev, "Unable to map IOMEM @ PA:%#x\n",
- res->start);
+ dev_err(dev, "Unable to map IOMEM @ PA:%pa\n",
+ &res->start);
return -ENOMEM;
}
- dev_dbg(dev,"res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n",
- res->start, i, (unsigned int)data->res_bases[i]);
+ dev_dbg(dev,"res->start = 0x%pa ioremap to data->res_bases[%d] = %p\n",
+ &res->start, i, data->res_bases[i]);
- if (strstr(data->dbgname, "vop") && cpu_is_rk312x()) {
+ if (strstr(data->dbgname, "vop") &&
+ (soc_is_rk3128() || soc_is_rk3126())) {
rk312x_vop_mmu_base = data->res_bases[0];
- dev_dbg(dev, "rk312x_vop_mmu_base = 0x%08x\n",
- (unsigned int)rk312x_vop_mmu_base);
+ dev_dbg(dev, "rk312x_vop_mmu_base = %p\n",
+ rk312x_vop_mmu_base);
}
-
- if (!strstr(data->dbgname, "isp"))
- if (!rockchip_iommu_reset(data->res_bases[i],
- data->dbgname))
- return -ENOENT;
}
for (i = 0; i < data->num_res_irq; i++) {
- if (cpu_is_rk312x() && strstr(data->dbgname, "vop")) {
+ if ((soc_is_rk3128() || soc_is_rk3126()) &&
+ strstr(data->dbgname, "vop")) {
dev_info(dev, "skip request vop mmu irq\n");
continue;
}