Merge remote-tracking branches 'regulator/topic/supply', 'regulator/topic/tps6105x...
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / arm-smmu.c
index 4cd0c29cb585000c0e5899651948ad1dc2ffbf1f..48a39dfa977795deb8271dc5b34a7b3e0be002d0 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        }
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
-       /* Ensure new page tables are visible to the hardware walker */
-       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
-               dsb(ishst);
-       } else {
-               /*
-                * If the SMMU can't walk tables in the CPU caches, treat them
-                * like non-coherent DMA since we need to flush the new entries
-                * all the way out to memory. There's no possibility of
-                * recursion here as the SMMU table walker will not be wired
-                * through another SMMU.
-                */
-               dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
-                            DMA_TO_DEVICE);
-       }
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
-       .flush_pgtable  = arm_smmu_flush_pgtable,
 };
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                .ias            = ias,
                .oas            = oas,
                .tlb            = &arm_smmu_gather_ops,
+               .iommu_dev      = smmu->dev,
        };
 
        smmu_domain->smmu = smmu;
@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        unsigned long size;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 id;
+       bool cttw_dt, cttw_reg;
 
        dev_notice(smmu->dev, "probing hardware configuration...\n");
        dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
 
-       if (id & ID0_CTTW) {
+       /*
+        * In order for DMA API calls to work properly, we must defer to what
+        * the DT says about coherency, regardless of what the hardware claims.
+        * Fortunately, this also opens up a workaround for systems where the
+        * ID register value has ended up configured incorrectly.
+        */
+       cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+       cttw_reg = !!(id & ID0_CTTW);
+       if (cttw_dt)
                smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-               dev_notice(smmu->dev, "\tcoherent table walk\n");
-       }
+       if (cttw_dt || cttw_reg)
+               dev_notice(smmu->dev, "\t%scoherent table walk\n",
+                          cttw_dt ? "" : "non-");
+       if (cttw_dt != cttw_reg)
+               dev_notice(smmu->dev,
+                          "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
        if (id & ID0_SMS) {
                u32 smr, sid, mask;