ARM64: dts: rk3399: support more frequency for evb2
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-svm.c
index c69e3f9ec9586bca0cd2ef712a3cf9fc25f4689f..d9939fa9b58887567d1fe8a7a94c7e5f1452cdf0 100644 (file)
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+       struct intel_svm_dev *sdev;
 
+       /* This might end up being called from exit_mmap(), *before* the page
+        * tables are cleared. And __mmu_notifier_release() will delete us from
+        * the list of notifiers so that our invalidate_range() callback doesn't
+        * get called when the page tables are cleared. So we need to protect
+        * against hardware accessing those page tables.
+        *
+        * We do it by clearing the entry in the PASID table and then flushing
+        * the IOTLB and the PASID table caches. This might upset hardware;
+        * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+        * page) so that we end up taking a fault that the hardware really
+        * *has* to handle gracefully without affecting other processes.
+        */
        svm->iommu->pasid_table[svm->pasid].val = 0;
+       wmb();
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdev, &svm->devs, list) {
+               intel_flush_pasid_dev(svm, sdev, svm->pasid);
+               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+       }
+       rcu_read_unlock();
 
-       /* There's no need to do any flush because we can't get here if there
-        * are any devices left anyway. */
-       WARN_ON(!list_empty(&svm->devs));
 }
 
 static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                                goto out;
                        }
                        iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
-                       mm = NULL;
                } else
                        iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
                wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                                kfree_rcu(sdev, rcu);
 
                                if (list_empty(&svm->devs)) {
-                                       mmu_notifier_unregister(&svm->notifier, svm->mm);
 
                                        idr_remove(&svm->iommu->pasid_idr, svm->pasid);
                                        if (svm->mm)
-                                               mmput(svm->mm);
+                                               mmu_notifier_unregister(&svm->notifier, svm->mm);
+
                                        /* We mandate that no page faults may be outstanding
                                         * for the PASID when intel_svm_unbind_mm() is called.
                                         * If that is not obeyed, subtle errors will happen.
@@ -484,12 +501,33 @@ struct page_req_dsc {
 };
 
 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
+
+static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
+{
+       unsigned long requested = 0;
+
+       if (req->exe_req)
+               requested |= VM_EXEC;
+
+       if (req->rd_req)
+               requested |= VM_READ;
+
+       if (req->wr_req)
+               requested |= VM_WRITE;
+
+       return (requested & ~vma->vm_flags) != 0;
+}
+
 static irqreturn_t prq_event_thread(int irq, void *d)
 {
        struct intel_iommu *iommu = d;
        struct intel_svm *svm = NULL;
        int head, tail, handled = 0;
 
+       /* Clear PPR bit before reading head/tail registers, to
+        * ensure that we get a new interrupt if needed. */
+       writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
+
        tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
        head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
        while (head != tail) {
@@ -534,11 +572,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                 * any faults on kernel addresses. */
                if (!svm->mm)
                        goto bad_req;
+               /* If the mm is already defunct, don't handle faults. */
+               if (!atomic_inc_not_zero(&svm->mm->mm_users))
+                       goto bad_req;
                down_read(&svm->mm->mmap_sem);
                vma = find_extend_vma(svm->mm, address);
                if (!vma || address < vma->vm_start)
                        goto invalid;
 
+               if (access_error(vma, req))
+                       goto invalid;
+
                ret = handle_mm_fault(svm->mm, vma, address,
                                      req->wr_req ? FAULT_FLAG_WRITE : 0);
                if (ret & VM_FAULT_ERROR)
@@ -547,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                result = QI_RESP_SUCCESS;
        invalid:
                up_read(&svm->mm->mmap_sem);
+               mmput(svm->mm);
        bad_req:
                /* Accounting for major/minor faults? */
                rcu_read_lock();