diff options
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 74 |
1 files changed, 19 insertions, 55 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4078358ed66e..af765c813cc8 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -51,6 +51,8 @@ struct iommu_dma_cookie { struct iommu_domain *fq_domain; }; +static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); + void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, struct iommu_domain *domain) { @@ -309,6 +311,11 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) domain->ops->flush_iotlb_all(domain); } +static bool dev_is_untrusted(struct device *dev) +{ + return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -363,8 +370,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, init_iova_domain(iovad, 1UL << order, base_pfn); - if (!cookie->fq_domain && !iommu_domain_get_attr(domain, - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { + if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) && + !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && + attr) { if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, iommu_dma_entry_dtor)) pr_warn("iova flush queue initialization failed\n"); @@ -378,21 +386,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } -static int iommu_dma_deferred_attach(struct device *dev, - struct iommu_domain *domain) -{ - const struct iommu_ops *ops = domain->ops; - - if (!is_kdump_kernel()) - return 0; - - if (unlikely(ops->is_attach_deferred && - ops->is_attach_deferred(domain, dev))) - return iommu_attach_device(domain, dev); - - return 0; -} - /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -521,11 +514,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, iova_align(iovad, size), dir, attrs); } -static bool dev_is_untrusted(struct device *dev) -{ - return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; -} - static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask) { @@ -535,7 +523,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; - if (unlikely(iommu_dma_deferred_attach(dev, domain))) + if (static_branch_unlikely(&iommu_deferred_attach_enabled) && + iommu_deferred_attach(dev, domain)) return DMA_MAPPING_ERROR; size = iova_align(iovad, size + iova_off); @@ -693,7 +682,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; - if (unlikely(iommu_dma_deferred_attach(dev, domain))) + if (static_branch_unlikely(&iommu_deferred_attach_enabled) && + iommu_deferred_attach(dev, domain)) return NULL; min_size = alloc_sizes & -alloc_sizes; @@ -976,7 +966,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; - if (unlikely(iommu_dma_deferred_attach(dev, domain))) + if (static_branch_unlikely(&iommu_deferred_attach_enabled) && + iommu_deferred_attach(dev, domain)) return 0; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1197,34 +1188,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, return cpu_addr; } -#ifdef CONFIG_DMA_REMAP -static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp) -{ - if (!gfpflags_allow_blocking(gfp)) { - struct page *page; - - page = dma_common_alloc_pages(dev, size, handle, dir, gfp); - if (!page) - return NULL; - return page_address(page); - } - - return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, - PAGE_KERNEL, 0); -} - -static void iommu_dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir) -{ - __iommu_dma_unmap(dev, handle, size); - __iommu_dma_free(dev, size, cpu_addr); -} -#else -#define iommu_dma_alloc_noncoherent NULL -#define iommu_dma_free_noncoherent NULL -#endif /* CONFIG_DMA_REMAP */ - static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) @@ -1295,8 +1258,6 @@ static const struct dma_map_ops iommu_dma_ops = { .free = iommu_dma_free, .alloc_pages = dma_common_alloc_pages, .free_pages = dma_common_free_pages, - .alloc_noncoherent = iommu_dma_alloc_noncoherent, - .free_noncoherent = iommu_dma_free_noncoherent, .mmap = iommu_dma_mmap, .get_sgtable = iommu_dma_get_sgtable, .map_page = iommu_dma_map_page, @@ -1424,6 +1385,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, static int iommu_dma_init(void) { + if (is_kdump_kernel()) + static_branch_enable(&iommu_deferred_attach_enabled); + return iova_cache_get(); } arch_initcall(iommu_dma_init); |