diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 138 |
1 files changed, 97 insertions, 41 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 749d8f235346..115ff26e9ced 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -31,7 +31,6 @@ #include <linux/pci.h> #include <linux/dmar.h> #include <linux/dma-mapping.h> -#include <linux/dma-direct.h> #include <linux/mempool.h> #include <linux/memory.h> #include <linux/cpu.h> @@ -1606,6 +1605,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, iommu_flush_dev_iotlb(domain, addr, mask); } +/* Notification for newly created mappings */ +static inline void __mapping_notify_one(struct intel_iommu *iommu, + struct dmar_domain *domain, + unsigned long pfn, unsigned int pages) +{ + /* It's a non-present to present mapping. Only flush if caching mode */ + if (cap_caching_mode(iommu->cap)) + iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); + else + iommu_flush_write_buffer(iommu); +} + static void iommu_flush_iova(struct iova_domain *iovad) { struct dmar_domain *domain; @@ -2340,18 +2351,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return 0; } +static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct scatterlist *sg, unsigned long phys_pfn, + unsigned long nr_pages, int prot) +{ + int ret; + struct intel_iommu *iommu; + + /* Do the real mapping first */ + ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); + if (ret) + return ret; + + /* Notify about the new mapping */ + if (domain_type_is_vm(domain)) { + /* VM typed domains can have more than one IOMMUs */ + int iommu_id; + for_each_domain_iommu(iommu_id, domain) { + iommu = g_iommus[iommu_id]; + __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); + } + } else { + /* General domains only have one IOMMU */ + iommu = domain_get_iommu(domain); + __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); + } + + return 0; +} + static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long nr_pages, int prot) { - return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); + return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); } static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phys_pfn, unsigned long nr_pages, int prot) { - return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); + return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); } static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -2459,7 +2499,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); - if (ecap_dev_iotlb_support(iommu->ecap) && + if (!pci_ats_disabled() && + ecap_dev_iotlb_support(iommu->ecap) && pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) && dmar_find_matched_atsr_unit(pdev)) info->ats_supported = 1; @@ -2533,7 +2574,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) struct device_domain_info *info = NULL; struct dmar_domain *domain = NULL; struct intel_iommu *iommu; - u16 req_id, dma_alias; + u16 dma_alias; unsigned long flags; u8 bus, devfn; @@ -2541,8 +2582,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) if (!iommu) return NULL; - req_id = ((u16)bus << 8) | devfn; - if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); @@ -2656,9 +2695,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, */ dma_pte_clear_range(domain, first_vpfn, last_vpfn); - return domain_pfn_mapping(domain, first_vpfn, first_vpfn, - last_vpfn - first_vpfn + 1, - DMA_PTE_READ|DMA_PTE_WRITE); + return __domain_mapping(domain, first_vpfn, NULL, + first_vpfn, last_vpfn - first_vpfn + 1, + DMA_PTE_READ|DMA_PTE_WRITE); } static int domain_prepare_identity_map(struct device *dev, @@ -3177,7 +3216,7 @@ static int copy_translation_tables(struct intel_iommu *iommu) /* This is too big for the stack - allocate it from slab */ ctxt_table_entries = ext ? 512 : 256; ret = -ENOMEM; - ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL); + ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL); if (!ctxt_tbls) goto out_unmap; @@ -3625,14 +3664,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, if (ret) goto error; - /* it's a non-present to present mapping. Only flush if caching mode */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain, - mm_to_dma_pfn(iova_pfn), - size, 0, 1); - else - iommu_flush_write_buffer(iommu); - start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr += paddr & ~PAGE_MASK; return start_paddr; @@ -3709,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - void *vaddr; + struct page *page = NULL; + int order; - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); - if (iommu_no_mapping(dev) || !vaddr) - return vaddr; + size = PAGE_ALIGN(size); + order = get_order(size); - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), - PAGE_ALIGN(size), DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (!*dma_handle) - goto out_free_pages; - return vaddr; + if (!iommu_no_mapping(dev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } + + if (gfpflags_allow_blocking(flags)) { + unsigned int count = size >> PAGE_SHIFT; + + page = dma_alloc_from_contiguous(dev, count, order, flags); + if (page && iommu_no_mapping(dev) && + page_to_phys(page) + size > dev->coherent_dma_mask) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + + if (!page) + page = alloc_pages(flags, order); + if (!page) + return NULL; + memset(page_address(page), 0, size); + + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, + DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); + if (*dma_handle) + return page_address(page); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); -out_free_pages: - dma_direct_free(dev, size, vaddr, *dma_handle, attrs); return NULL; } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (!iommu_no_mapping(dev)) - intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); - dma_direct_free(dev, size, vaddr, dma_handle, attrs); + int order; + struct page *page = virt_to_page(vaddr); + + size = PAGE_ALIGN(size); + order = get_order(size); + + intel_unmap(dev, dma_handle, size); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); } static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, @@ -3819,12 +3881,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele return 0; } - /* it's a non-present to present mapping. Only flush if caching mode */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1); - else - iommu_flush_write_buffer(iommu); - return nelems; } @@ -4034,7 +4090,7 @@ static int iommu_suspend(void) unsigned long flag; for_each_active_iommu(iommu, drhd) { - iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, + iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), GFP_ATOMIC); if (!iommu->iommu_state) goto nomem; |