diff options
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r-- | sound/core/memalloc.c | 177 |
1 files changed, 134 insertions, 43 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 439a358ecfe9..c7c943c661e6 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -29,12 +29,12 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab, return (__force gfp_t)(unsigned long)dmab->dev.dev; } -static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) +static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (WARN_ON_ONCE(!ops || !ops->alloc)) - return -EINVAL; + return NULL; return ops->alloc(dmab, size); } @@ -54,8 +54,6 @@ static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) int snd_dma_alloc_pages(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab) { - int err; - if (WARN_ON(!size)) return -ENXIO; if (WARN_ON(!dmab)) @@ -65,12 +63,9 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size, dmab->dev.type = type; dmab->dev.dev = device; dmab->bytes = 0; - dmab->area = NULL; dmab->addr = 0; dmab->private_data = NULL; - err = __snd_dma_alloc_pages(dmab, size); - if (err < 0) - return err; + dmab->area = __snd_dma_alloc_pages(dmab, size); if (!dmab->area) return -ENOMEM; dmab->bytes = size; @@ -127,6 +122,52 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab) } EXPORT_SYMBOL(snd_dma_free_pages); +/* called by devres */ +static void __snd_release_pages(struct device *dev, void *res) +{ + snd_dma_free_pages(res); +} + +/** + * snd_devm_alloc_pages - allocate the buffer and manage with devres + * @dev: the device pointer + * @type: the DMA buffer type + * @size: the buffer size to allocate + * + * Allocate buffer pages depending on the given type and manage using devres. + * The pages will be released automatically at the device removal. + * + * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, + * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or + * SNDRV_DMA_TYPE_VMALLOC type. + * + * The function returns the snd_dma_buffer object at success, or NULL if failed. + */ +struct snd_dma_buffer * +snd_devm_alloc_pages(struct device *dev, int type, size_t size) +{ + struct snd_dma_buffer *dmab; + int err; + + if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || + type == SNDRV_DMA_TYPE_VMALLOC)) + return NULL; + + dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); + if (!dmab) + return NULL; + + err = snd_dma_alloc_pages(type, dev, size, dmab); + if (err < 0) { + devres_free(dmab); + return NULL; + } + + devres_add(dev, dmab); + return dmab; +} +EXPORT_SYMBOL_GPL(snd_devm_alloc_pages); + /** * snd_dma_buffer_mmap - perform mmap of the given DMA buffer * @dmab: buffer allocation information @@ -198,12 +239,14 @@ EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); /* * Continuous pages allocator */ -static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) +static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) { gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL); + void *p = alloc_pages_exact(size, gfp); - dmab->area = alloc_pages_exact(size, gfp); - return 0; + if (p) + dmab->addr = page_to_phys(virt_to_page(p)); + return p; } static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) @@ -215,7 +258,7 @@ static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return remap_pfn_range(area, area->vm_start, - page_to_pfn(virt_to_page(dmab->area)), + dmab->addr >> PAGE_SHIFT, area->vm_end - area->vm_start, area->vm_page_prot); } @@ -229,12 +272,11 @@ static const struct snd_malloc_ops snd_dma_continuous_ops = { /* * VMALLOC allocator */ -static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) +static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) { gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); - dmab->area = __vmalloc(size, gfp); - return 0; + return __vmalloc(size, gfp); } static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) @@ -248,11 +290,13 @@ static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, return remap_vmalloc_range(area, dmab->area, 0); } +#define get_vmalloc_page_addr(dmab, offset) \ + page_to_phys(vmalloc_to_page((dmab)->area + (offset))) + static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, size_t offset) { - return page_to_phys(vmalloc_to_page(dmab->area + offset)) + - offset % PAGE_SIZE; + return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; } static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, @@ -265,11 +309,23 @@ static unsigned int snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size) { - ofs %= PAGE_SIZE; - size += ofs; - if (size > PAGE_SIZE) - size = PAGE_SIZE; - return size - ofs; + unsigned int start, end; + unsigned long addr; + + start = ALIGN_DOWN(ofs, PAGE_SIZE); + end = ofs + size - 1; /* the last byte address */ + /* check page continuity */ + addr = get_vmalloc_page_addr(dmab, start); + for (;;) { + start += PAGE_SIZE; + if (start > end) + break; + addr += PAGE_SIZE; + if (get_vmalloc_page_addr(dmab, start) != addr) + return start - ofs; + } + /* ok, all on continuous pages */ + return size; } static const struct snd_malloc_ops snd_dma_vmalloc_ops = { @@ -286,20 +342,20 @@ static const struct snd_malloc_ops snd_dma_vmalloc_ops = { * IRAM allocator */ #ifdef CONFIG_GENERIC_ALLOCATOR -static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) +static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) { struct device *dev = dmab->dev.dev; struct gen_pool *pool; + void *p; if (dev->of_node) { pool = of_gen_pool_get(dev->of_node, "iram", 0); /* Assign the pool into private_data field */ dmab->private_data = pool; - dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, - PAGE_SIZE); - if (dmab->area) - return 0; + p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); + if (p) + return p; } /* Internal memory might have limited size and no enough space, @@ -334,31 +390,31 @@ static const struct snd_malloc_ops snd_dma_iram_ops = { }; #endif /* CONFIG_GENERIC_ALLOCATOR */ +#define DEFAULT_GFP \ + (GFP_KERNEL | \ + __GFP_COMP | /* compound page lets parts be mapped */ \ + __GFP_NORETRY | /* don't trigger OOM-killer */ \ + __GFP_NOWARN) /* no stack trace print - this call is non-critical */ + /* * Coherent device pages allocator */ -static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) +static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) { - gfp_t gfp_flags; - - gfp_flags = GFP_KERNEL - | __GFP_COMP /* compound page lets parts be mapped */ - | __GFP_NORETRY /* don't trigger OOM-killer */ - | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ - dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, - gfp_flags); + void *p; + + p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); #ifdef CONFIG_X86 - if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) - set_memory_wc((unsigned long)dmab->area, - PAGE_ALIGN(size) >> PAGE_SHIFT); + if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) + set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT); #endif - return 0; + return p; } static void snd_dma_dev_free(struct snd_dma_buffer *dmab) { #ifdef CONFIG_X86 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) + if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) set_memory_wb((unsigned long)dmab->area, PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); #endif @@ -368,6 +424,10 @@ static void snd_dma_dev_free(struct snd_dma_buffer *dmab) static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { +#ifdef CONFIG_X86 + if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) + area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); +#endif return dma_mmap_coherent(dmab->dev.dev, area, dmab->area, dmab->addr, dmab->bytes); } @@ -377,6 +437,37 @@ static const struct snd_malloc_ops snd_dma_dev_ops = { .free = snd_dma_dev_free, .mmap = snd_dma_dev_mmap, }; + +/* + * Write-combined pages + */ +#ifdef CONFIG_X86 +/* On x86, share the same ops as the standard dev ops */ +#define snd_dma_wc_ops snd_dma_dev_ops +#else /* CONFIG_X86 */ +static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); +} + +static void snd_dma_wc_free(struct snd_dma_buffer *dmab) +{ + dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); +} + +static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, + struct vm_area_struct *area) +{ + return dma_mmap_wc(dmab->dev.dev, area, + dmab->area, dmab->addr, dmab->bytes); +} + +static const struct snd_malloc_ops snd_dma_wc_ops = { + .alloc = snd_dma_wc_alloc, + .free = snd_dma_wc_free, + .mmap = snd_dma_wc_mmap, +}; +#endif /* CONFIG_X86 */ #endif /* CONFIG_HAS_DMA */ /* @@ -387,14 +478,14 @@ static const struct snd_malloc_ops *dma_ops[] = { [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, #ifdef CONFIG_HAS_DMA [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, - [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops, + [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, #ifdef CONFIG_GENERIC_ALLOCATOR [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, #endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_HAS_DMA */ #ifdef CONFIG_SND_DMA_SGBUF [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, - [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops, + [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, #endif }; |