X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fpci%2Fintel-iommu.c;h=23e56a564e05056a208ac41d98828d32987865b7;hb=ca1ee219c070eab755712d50638bbcd1f8630fc1;hp=f3f686581a9026685dc3e6c994b00e88ed88486c;hpb=7df4f694356a125e4553ca35b215fd9e88e858c5;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index f3f686581a90..23e56a564e05 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context) * 1: writable * 2-6: reserved * 7: super page - * 8-11: available + * 8-10: available + * 11: snoop behavior * 12-63: Host physcial address */ struct dma_pte { @@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte) pte->val |= DMA_PTE_WRITE; } +static inline void dma_set_pte_snp(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_SNP; +} + static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) { pte->val = (pte->val & ~3) | (prot & 3); @@ -231,6 +237,7 @@ struct dmar_domain { int flags; /* flags to find out type of domain */ int iommu_coherency;/* indicate coherency of iommu access */ + int iommu_snooping; /* indicate snooping control feature*/ int iommu_count; /* reference count of iommu */ spinlock_t iommu_lock; /* protect iommu set in domain */ u64 max_addr; /* maximum mapped address */ @@ -421,7 +428,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) return g_iommus[iommu_id]; } -/* "Coherency" capability may be different across iommus */ static void domain_update_iommu_coherency(struct dmar_domain *domain) { int i; @@ -438,6 +444,29 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) } } +static void domain_update_iommu_snooping(struct dmar_domain *domain) +{ + int i; + + domain->iommu_snooping = 1; + + i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); + for (; i < g_num_of_iommus; ) { + if (!ecap_sc_support(g_iommus[i]->ecap)) { + domain->iommu_snooping = 0; + break; + } + i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); + } +} + +/* Some capabilities may be different across iommus */ +static void domain_update_iommu_cap(struct dmar_domain *domain) +{ + domain_update_iommu_coherency(domain); + domain_update_iommu_snooping(domain); +} + static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -689,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) { int addr_width = agaw_to_width(domain->agaw); + int npages; start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ start = PAGE_ALIGN(start); end &= PAGE_MASK; + npages = (end - start) / VTD_PAGE_SIZE; /* we don't need lock here, nobody else touches the iova range */ - while (start < end) { + while (npages--) { dma_pte_clear_one(domain, start); start += VTD_PAGE_SIZE; } @@ -1004,194 +1035,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu) return 0; } -/* iommu interrupt handling. Most stuff are MSI-like. */ - -static const char *fault_reason_strings[] = -{ - "Software", - "Present bit in root entry is clear", - "Present bit in context entry is clear", - "Invalid context entry", - "Access beyond MGAW", - "PTE Write access is not set", - "PTE Read access is not set", - "Next page table ptr is invalid", - "Root table address invalid", - "Context table ptr is invalid", - "non-zero reserved fields in RTP", - "non-zero reserved fields in CTP", - "non-zero reserved fields in PTE", -}; -#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) - -const char *dmar_get_fault_reason(u8 fault_reason) -{ - if (fault_reason > MAX_FAULT_REASON_IDX) - return "Unknown"; - else - return fault_reason_strings[fault_reason]; -} - -void dmar_msi_unmask(unsigned int irq) -{ - struct intel_iommu *iommu = get_irq_data(irq); - unsigned long flag; - - /* unmask it */ - spin_lock_irqsave(&iommu->register_lock, flag); - writel(0, iommu->reg + DMAR_FECTL_REG); - /* Read a reg to force flush the post write */ - readl(iommu->reg + DMAR_FECTL_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_mask(unsigned int irq) -{ - unsigned long flag; - struct intel_iommu *iommu = get_irq_data(irq); - - /* mask it */ - spin_lock_irqsave(&iommu->register_lock, flag); - writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); - /* Read a reg to force flush the post write */ - readl(iommu->reg + DMAR_FECTL_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_write(int irq, struct msi_msg *msg) -{ - struct intel_iommu *iommu = get_irq_data(irq); - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - writel(msg->data, iommu->reg + DMAR_FEDATA_REG); - writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); - writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_read(int irq, struct msi_msg *msg) -{ - struct intel_iommu *iommu = get_irq_data(irq); - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - msg->data = readl(iommu->reg + DMAR_FEDATA_REG); - msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); - msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, unsigned long long addr) -{ - const char *reason; - - reason = dmar_get_fault_reason(fault_reason); - - printk(KERN_ERR - "DMAR:[%s] Request device [%02x:%02x.%d] " - "fault addr %llx \n" - "DMAR:[fault reason %02d] %s\n", - (type ? "DMA Read" : "DMA Write"), - (source_id >> 8), PCI_SLOT(source_id & 0xFF), - PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); - return 0; -} - -#define PRIMARY_FAULT_REG_LEN (16) -static irqreturn_t iommu_page_fault(int irq, void *dev_id) -{ - struct intel_iommu *iommu = dev_id; - int reg, fault_index; - u32 fault_status; - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - fault_status = readl(iommu->reg + DMAR_FSTS_REG); - - /* TBD: ignore advanced fault log currently */ - if (!(fault_status & DMA_FSTS_PPF)) - goto clear_overflow; - - fault_index = dma_fsts_fault_record_index(fault_status); - reg = cap_fault_reg_offset(iommu->cap); - while (1) { - u8 fault_reason; - u16 source_id; - u64 guest_addr; - int type; - u32 data; - - /* highest 32 bits */ - data = readl(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12); - if (!(data & DMA_FRCD_F)) - break; - - fault_reason = dma_frcd_fault_reason(data); - type = dma_frcd_type(data); - - data = readl(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 8); - source_id = dma_frcd_source_id(data); - - guest_addr = dmar_readq(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN); - guest_addr = dma_frcd_page_addr(guest_addr); - /* clear the fault */ - writel(DMA_FRCD_F, iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - - iommu_page_fault_do_one(iommu, type, fault_reason, - source_id, guest_addr); - - fault_index++; - if (fault_index > cap_num_fault_regs(iommu->cap)) - fault_index = 0; - spin_lock_irqsave(&iommu->register_lock, flag); - } -clear_overflow: - /* clear primary fault overflow */ - fault_status = readl(iommu->reg + DMAR_FSTS_REG); - if (fault_status & DMA_FSTS_PFO) - writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - return IRQ_HANDLED; -} - -int dmar_set_interrupt(struct intel_iommu *iommu) -{ - int irq, ret; - - irq = create_irq(); - if (!irq) { - printk(KERN_ERR "IOMMU: no free vectors\n"); - return -EINVAL; - } - - set_irq_data(irq, iommu); - iommu->irq = irq; - - ret = arch_setup_dmar_msi(irq); - if (ret) { - set_irq_data(irq, NULL); - iommu->irq = 0; - destroy_irq(irq); - return 0; - } - - /* Force fault register is cleared */ - iommu_page_fault(irq, iommu); - - ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu); - if (ret) - printk(KERN_ERR "IOMMU: can't request irq\n"); - return ret; -} static int iommu_init_domains(struct intel_iommu *iommu) { @@ -1429,6 +1272,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width) else domain->iommu_coherency = 0; + if (ecap_sc_support(iommu->ecap)) + domain->iommu_snooping = 1; + else + domain->iommu_snooping = 0; + domain->iommu_count = 1; /* always allocate the top pgd */ @@ -1557,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, spin_lock_irqsave(&domain->iommu_lock, flags); if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { domain->iommu_count++; - domain_update_iommu_coherency(domain); + domain_update_iommu_cap(domain); } spin_unlock_irqrestore(&domain->iommu_lock, flags); return 0; @@ -1657,6 +1505,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, BUG_ON(dma_pte_addr(pte)); dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_prot(pte, prot); + if (prot & DMA_PTE_SNP) + dma_set_pte_snp(pte); domain_flush_cache(domain, pte, sizeof(*pte)); start_pfn++; index++; @@ -1970,7 +1820,7 @@ static inline void iommu_prepare_isa(void) ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); if (ret) - printk("IOMMU: Failed to create 0-64M identity map, " + printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " "floppy might not work\n"); } @@ -1987,7 +1837,7 @@ static int __init init_dmars(void) struct dmar_rmrr_unit *rmrr; struct pci_dev *pdev; struct intel_iommu *iommu; - int i, ret, unit = 0; + int i, ret; /* * for each drhd @@ -2043,11 +1893,40 @@ static int __init init_dmars(void) } } + /* + * Start from the sane iommu hardware state. + */ for_each_drhd_unit(drhd) { if (drhd->ignored) continue; iommu = drhd->iommu; + + /* + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. + */ + if (iommu->qi) + continue; + + /* + * Clear any previous faults. + */ + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); + } + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (dmar_enable_qi(iommu)) { /* * Queued Invalidate not enabled, use Register Based @@ -2109,7 +1988,6 @@ static int __init init_dmars(void) if (drhd->ignored) continue; iommu = drhd->iommu; - sprintf (iommu->name, "dmar%d", unit++); iommu_flush_write_buffer(iommu); @@ -2279,16 +2157,18 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, error: if (iova) __free_iova(&domain->iovad, iova); - printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", + printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", pci_name(pdev), size, (unsigned long long)paddr, dir); return 0; } -dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, - size_t size, int dir) +static dma_addr_t intel_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { - return __intel_map_single(hwdev, paddr, size, dir, - to_pci_dev(hwdev)->dma_mask); + return __intel_map_single(dev, page_to_phys(page) + offset, size, + dir, to_pci_dev(dev)->dma_mask); } static void flush_unmaps(void) @@ -2352,8 +2232,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, - int dir) +static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -2375,7 +2256,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, start_addr = iova->pfn_lo << PAGE_SHIFT; size = aligned_size((u64)dev_addr, size); - pr_debug("Device %s unmapping: %lx@%llx\n", + pr_debug("Device %s unmapping: %zx@%llx\n", pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ @@ -2397,8 +2278,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, } } -void *intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) +{ + intel_unmap_page(dev, dev_addr, size, dir, NULL); +} + +static void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; @@ -2421,8 +2308,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; } -void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; @@ -2433,10 +2320,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, free_pages((unsigned long)vaddr, order); } -#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) - -void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) +static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2444,7 +2330,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, unsigned long start_addr; struct iova *iova; size_t size = 0; - void *addr; + phys_addr_t addr; struct scatterlist *sg; struct intel_iommu *iommu; @@ -2460,7 +2346,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, if (!iova) return; for_each_sg(sglist, sg, nelems, i) { - addr = SG_ENT_VIRT_ADDRESS(sg); + addr = page_to_phys(sg_page(sg)) + sg->offset; size += aligned_size((u64)addr, sg->length); } @@ -2487,16 +2373,16 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); + sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; sg->dma_length = sg->length; } return nelems; } -int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, - int dir) +static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, struct dma_attrs *attrs) { - void *addr; + phys_addr_t addr; int i; struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; @@ -2520,8 +2406,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, iommu = domain_get_iommu(domain); for_each_sg(sglist, sg, nelems, i) { - addr = SG_ENT_VIRT_ADDRESS(sg); - addr = (void *)virt_to_phys(addr); + addr = page_to_phys(sg_page(sg)) + sg->offset; size += aligned_size((u64)addr, sg->length); } @@ -2544,8 +2429,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, start_addr = iova->pfn_lo << PAGE_SHIFT; offset = 0; for_each_sg(sglist, sg, nelems, i) { - addr = SG_ENT_VIRT_ADDRESS(sg); - addr = (void *)virt_to_phys(addr); + addr = page_to_phys(sg_page(sg)) + sg->offset; size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, ((u64)addr) & PAGE_MASK, @@ -2574,13 +2458,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, return nelems; } -static struct dma_mapping_ops intel_dma_ops = { +static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return !dma_addr; +} + +struct dma_map_ops intel_dma_ops = { .alloc_coherent = intel_alloc_coherent, .free_coherent = intel_free_coherent, - .map_single = intel_map_single, - .unmap_single = intel_unmap_single, .map_sg = intel_map_sg, .unmap_sg = intel_unmap_sg, + .map_page = intel_map_page, + .unmap_page = intel_unmap_page, + .mapping_error = intel_mapping_error, }; static inline int iommu_domain_cache_init(void) @@ -2772,6 +2662,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain, return 0; } +static void iommu_detach_dependent_devices(struct intel_iommu *iommu, + struct pci_dev *pdev) +{ + struct pci_dev *tmp, *parent; + + if (!iommu || !pdev) + return; + + /* dependent device detach */ + tmp = pci_find_upstream_pcie_bridge(pdev); + /* Secondary interface's bus number and devfn 0 */ + if (tmp) { + parent = pdev->bus->self; + while (parent != tmp) { + iommu_detach_dev(iommu, parent->bus->number, + parent->devfn); + parent = parent->bus->self; + } + if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ + iommu_detach_dev(iommu, + tmp->subordinate->number, 0); + else /* this is a legacy PCI bridge */ + iommu_detach_dev(iommu, + tmp->bus->number, tmp->devfn); + } +} + static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, struct pci_dev *pdev) { @@ -2797,6 +2714,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, spin_unlock_irqrestore(&device_domain_lock, flags); iommu_detach_dev(iommu, info->bus, info->devfn); + iommu_detach_dependent_devices(iommu, pdev); free_devinfo_mem(info); spin_lock_irqsave(&device_domain_lock, flags); @@ -2820,7 +2738,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, spin_lock_irqsave(&domain->iommu_lock, tmp_flags); clear_bit(iommu->seq_id, &domain->iommu_bmp); domain->iommu_count--; - domain_update_iommu_coherency(domain); + domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); } @@ -2846,15 +2764,16 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) iommu = device_to_iommu(info->bus, info->devfn); iommu_detach_dev(iommu, info->bus, info->devfn); + iommu_detach_dependent_devices(iommu, info->dev); /* clear this iommu in iommu_bmp, update iommu count - * and coherency + * and capabilities */ spin_lock_irqsave(&domain->iommu_lock, flags2); if (test_and_clear_bit(iommu->seq_id, &domain->iommu_bmp)) { domain->iommu_count--; - domain_update_iommu_coherency(domain); + domain_update_iommu_cap(domain); } spin_unlock_irqrestore(&domain->iommu_lock, flags2); @@ -3077,6 +2996,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain, prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) prot |= DMA_PTE_WRITE; + if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) + prot |= DMA_PTE_SNP; max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); if (dmar_domain->max_addr < max_addr) { @@ -3130,6 +3051,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, return phys; } +static int intel_iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + struct dmar_domain *dmar_domain = domain->priv; + + if (cap == IOMMU_CAP_CACHE_COHERENCY) + return dmar_domain->iommu_snooping; + + return 0; +} + static struct iommu_ops intel_iommu_ops = { .domain_init = intel_iommu_domain_init, .domain_destroy = intel_iommu_domain_destroy, @@ -3138,6 +3070,7 @@ static struct iommu_ops intel_iommu_ops = { .map = intel_iommu_map_range, .unmap = intel_iommu_unmap_range, .iova_to_phys = intel_iommu_iova_to_phys, + .domain_has_cap = intel_iommu_domain_has_cap, }; static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)