#include "rockchip-iommu.h"
+#define IOMMU_REGION_GUARD (2<<PAGE_SHIFT)
+
static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
{
struct rk_vm_region *region;
return NULL;
}
-int iovmm_activate(struct device *dev)
+int rockchip_iovmm_invalidate_tlb(struct device *dev)
+{
+ int ret = rockchip_iommu_tlb_invalidate_global(dev);
+
+ return ret;
+}
+
+void rockchip_iovmm_set_fault_handler(struct device *dev,
+ rockchip_iommu_fault_handler_t handler)
+{
+ struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ data->fault_handler = handler;
+}
+
+int rockchip_iovmm_activate(struct device *dev)
{
struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
return iommu_attach_device(vmm->domain, dev);
}
-void iovmm_deactivate(struct device *dev)
+void rockchip_iovmm_deactivate(struct device *dev)
{
struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
iommu_detach_device(vmm->domain, dev);
}
-dma_addr_t iovmm_map(struct device *dev,struct scatterlist *sg, off_t offset,size_t size)
+dma_addr_t rockchip_iovmm_map(struct device *dev,
+ struct scatterlist *sg, off_t offset, size_t size)
{
off_t start_off;
dma_addr_t addr, start = 0;
order = __fls(min_t(size_t, size, SZ_1M));
region = kmalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- {
+ if (!region) {
ret = -ENOMEM;
goto err_map_nomem;
}
- //start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, size, order);
-
- start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
- if (!start)
- {
+ start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool,
+ size+IOMMU_REGION_GUARD);
+ if (!start) {
ret = -ENOMEM;
goto err_map_noiomem;
}
+ pr_debug("%s: size = %zx\n", __func__, size);
+
addr = start;
do {
phys_addr_t phys;
len = sg_dma_len(sg);
/* if back to back sg entries are contiguous consolidate them */
- while (sg_next(sg) &&sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg)))
- {
+ while (sg_next(sg) && sg_phys(sg) +
+ sg_dma_len(sg) == sg_phys(sg_next(sg))) {
len += sg_dma_len(sg_next(sg));
sg = sg_next(sg);
}
- if (offset > 0)
- {
+ if (offset > 0) {
len -= offset;
phys += offset;
offset = 0;
}
- if (offset_in_page(phys))
- {
+ if (offset_in_page(phys)) {
len += offset_in_page(phys);
phys = round_down(phys, PAGE_SIZE);
}
if (len > (size - mapped_size))
len = size - mapped_size;
-
+ pr_debug("addr = %pad, phys = %pa, len = %zx\n", &addr, &phys, len);
ret = iommu_map(vmm->domain, addr, phys, len, 0);
if (ret)
break;
spin_unlock(&vmm->lock);
- rockchip_sysmmu_tlb_invalidate(dev);
-
- pr_err("IOVMM: Allocated VM region @ %#x/%#X bytes.\n",region->start, region->size);
+ ret = rockchip_iommu_tlb_invalidate(dev);
+ if (ret) {
+ spin_lock(&vmm->lock);
+ list_del(®ion->node);
+ spin_unlock(&vmm->lock);
+ goto err_map_map;
+ }
+ dev_dbg(dev->archdata.iommu, "IOVMM: Allocated VM region @ %p/%#X bytes.\n",
+ ®ion->start, region->size);
return region->start;
err_map_noiomem:
kfree(region);
err_map_nomem:
- pr_err("IOVMM: Failed to allocated VM region for %#x bytes.\n",size);
+ dev_err(dev->archdata.iommu, "IOVMM: Failed to allocated VM region for %zx bytes.\n", size);
return (dma_addr_t)ret;
}
-void iovmm_unmap(struct device *dev, dma_addr_t iova)
+void rockchip_iovmm_unmap(struct device *dev, dma_addr_t iova)
{
struct rk_vm_region *region;
struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
spin_lock(&vmm->lock);
region = find_region(vmm, iova);
- if (WARN_ON(!region))
- {
+ if (WARN_ON(!region)) {
spin_unlock(&vmm->lock);
return;
}
region->start = round_down(region->start, PAGE_SIZE);
- unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
-
- rockchip_sysmmu_tlb_invalidate(dev);
-
- gen_pool_free(vmm->vmm_pool, region->start, region->size);
+ unmapped_size = iommu_unmap(vmm->domain,
+ region->start, region->size);
+ /*
+ rockchip_iommu_tlb_invalidate(dev);
+ */
+ gen_pool_free(vmm->vmm_pool, region->start,
+ region->size+IOMMU_REGION_GUARD);
WARN_ON(unmapped_size != region->size);
- pr_err("IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
-
+
+ dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %zx bytes from %pad.\n",
+ unmapped_size, ®ion->start);
+
kfree(region);
}
-int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
+int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
{
struct rk_vm_region *region;
struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
int ret;
- if (WARN_ON((phys + size) >= IOVA_START))
- {
- dev_err(dev,"Unable to create one to one mapping for %#x @ %#x\n",size, phys);
+ if (WARN_ON((phys + size) >= IOVA_START)) {
+ dev_err(dev->archdata.iommu, "Unable to create one to one mapping for %zx @ %pa\n",
+ size, &phys);
return -EINVAL;
}
ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
- if (ret < 0)
- {
+ if (ret < 0) {
kfree(region);
return ret;
}
list_add(®ion->node, &vmm->regions_list);
spin_unlock(&vmm->lock);
-
- rockchip_sysmmu_tlb_invalidate(dev);
+
+ ret = rockchip_iommu_tlb_invalidate(dev);
+ if (ret)
+ return ret;
return 0;
}
-void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
+void rockchip_iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
{
struct rk_vm_region *region;
struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
spin_lock(&vmm->lock);
region = find_region(vmm, (dma_addr_t)phys);
- if (WARN_ON(!region))
- {
+ if (WARN_ON(!region)) {
spin_unlock(&vmm->lock);
return;
}
spin_unlock(&vmm->lock);
unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
- rockchip_sysmmu_tlb_invalidate(dev);
WARN_ON(unmapped_size != region->size);
- dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
+ dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %zx bytes from %pad.\n",
+ unmapped_size, ®ion->start);
kfree(region);
}
-int rockchip_init_iovmm(struct device *sysmmu, struct rk_iovmm *vmm)
+int rockchip_init_iovmm(struct device *iommu, struct rk_iovmm *vmm)
{
int ret = 0;
vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!vmm->vmm_pool)
- {
+ if (!vmm->vmm_pool) {
ret = -ENOMEM;
goto err_setup_genalloc;
}
goto err_setup_domain;
vmm->domain = iommu_domain_alloc(&platform_bus_type);
- if (!vmm->domain)
- {
+ if (!vmm->domain) {
ret = -ENOMEM;
goto err_setup_domain;
}
spin_lock_init(&vmm->lock);
INIT_LIST_HEAD(&vmm->regions_list);
-
- pr_info("IOVMM: Created %#x B IOVMM from %#x.\n",IOVM_SIZE, IOVA_START);
+
+ dev_info(iommu, "IOVMM: Created %#x B IOVMM from %#x.\n",
+ IOVM_SIZE, IOVA_START);
return 0;
err_setup_domain:
gen_pool_destroy(vmm->vmm_pool);
err_setup_genalloc:
- dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);
+ dev_err(iommu, "IOVMM: Failed to create IOVMM (%d)\n", ret);
return ret;
}
-
-/****
-1,success : pointer to the device inside of platform device
-2,fail : NULL
-****/
-struct device *rockchip_get_sysmmu_device_by_compatible(const char *compt)
-{
- struct device_node *dn = NULL;
- struct platform_device *pd = NULL;
- struct device *ret = NULL ;
-
-#if 0
- dn = of_find_node_by_name(NULL,name);
-#endif
-
- dn = of_find_compatible_node(NULL,NULL,compt);
- if(!dn)
- {
- printk("can't find device node %s \r\n",compt);
- return NULL;
- }
-
- pd = of_find_device_by_node(dn);
- if(!pd)
- {
- printk("can't find platform device in device node %s \r\n",compt);
- return NULL;
- }
- ret = &pd->dev;
-
- return ret;
-
-}
-