Merge tag 'for_linux-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jwess...
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / iova.c
index f6b17e6af2fb261f84eafd974d652a76716dcb95..9dd8208312c2e75874ce70dcc6008a166eb5ffca 100644 (file)
  */
 
 #include <linux/iova.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *iommu_iova_cache;
+
+int iommu_iova_cache_init(void)
+{
+       int ret = 0;
+
+       iommu_iova_cache = kmem_cache_create("iommu_iova",
+                                        sizeof(struct iova),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+                                        NULL);
+       if (!iommu_iova_cache) {
+               pr_err("Couldn't create iova cache\n");
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
+
+void iommu_iova_cache_destroy(void)
+{
+       kmem_cache_destroy(iommu_iova_cache);
+}
+
+struct iova *alloc_iova_mem(void)
+{
+       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
+}
+
+void free_iova_mem(struct iova *iova)
+{
+       kmem_cache_free(iommu_iova_cache, iova);
+}
 
 void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
+init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+       unsigned long start_pfn, unsigned long pfn_32bit)
 {
+       /*
+        * IOVA granularity will normally be equal to the smallest
+        * supported IOMMU page size; both *must* be capable of
+        * representing individual CPU pages exactly.
+        */
+       BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+
        spin_lock_init(&iovad->iova_rbtree_lock);
        iovad->rbroot = RB_ROOT;
        iovad->cached32_node = NULL;
+       iovad->granule = granule;
+       iovad->start_pfn = start_pfn;
        iovad->dma_32bit_pfn = pfn_32bit;
 }
 
@@ -127,7 +172,7 @@ move_left:
        if (!curr) {
                if (size_aligned)
                        pad_size = iova_get_pad_size(size, limit_pfn);
-               if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+               if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
                        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
                        return -ENOMEM;
                }
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  * @size: - size of page frames to allocate
  * @limit_pfn: - max limit address
  * @size_aligned: - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
+ * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
  * flag is set then the allocated address iova->pfn_lo will be naturally
  * aligned on roundup_power_of_two(size).
  */