From 9afae2719273fa1d406829bf3498f82dbdba71c7 Mon Sep 17 00:00:00 2001 From: "Xiangliang.Yu" Date: Wed, 16 Aug 2017 14:25:51 +0800 Subject: drm/ttm: Fix accounting error when fail to get pages for pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When fail to get needed page for pool, need to put allocated pages into pool. But current code has a miscalculation of allocated pages, correct it. Signed-off-by: Xiangliang.Yu Reviewed-by: Christian König Reviewed-by: Monk Liu Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index eeddc1e48409..871599826773 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, } else { pr_err("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ - list_for_each_entry(p, &pool->list, lru) { + list_for_each_entry(p, &new_pages, lru) { ++cpages; } list_splice(&new_pages, &pool->list); -- cgit v1.2.3 From a4dec819c8bba6365eb893a4ca88db4dd1210110 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 18 Aug 2017 10:04:57 -0400 Subject: drm/ttm: Add helper functions to populate/map in one call (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions replace a section of common code found in radeon/amdgpu drivers (and possibly others) as part of the ttm_tt_*populate() callbacks. v2: squash in fix for sw iommu from Tom Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 41 ++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_page_alloc.h | 21 ++++++++++++++++++ 2 files changed, 62 insertions(+) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 871599826773..6a660d196d87 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -920,6 +920,47 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + int r; + + r = ttm_pool_populate(&tt->ttm); + if (r) + return r; + + for (i = 0; i < tt->ttm.num_pages; i++) { + tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], + 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, tt->dma_address[i])) { + while (i--) { + dma_unmap_page(dev, tt->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + tt->dma_address[i] = 0; + } + ttm_pool_unpopulate(&tt->ttm); + return -EFAULT; + } + } + return 0; +} +EXPORT_SYMBOL(ttm_populate_and_map_pages); + +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + + for (i = 0; i < tt->ttm.num_pages; i++) { + if (tt->dma_address[i]) { + dma_unmap_page(dev, tt->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + } + } + ttm_pool_unpopulate(&tt->ttm); +} +EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); + int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 49a828425fa2..bf21166f2b97 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -83,6 +83,17 @@ extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); + +/** + * Populates and DMA maps pages to fullfil a ttm_dma_populate() request + */ +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); + +/** + * Unpopulates and DMA unmaps pages as part of a + * ttm_dma_unpopulate() request */ +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); + #else static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) @@ -105,6 +116,16 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { } + +static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + return -ENOMEM; +} + +static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) +{ +} + #endif #endif -- cgit v1.2.3 From 7a9667ae197460e6c9c3bb432fe68c708fce6259 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 5 Sep 2017 07:30:59 -0400 Subject: drm/ttm: Fix configuration error around populate_and_map() functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed kbuild errors when IOMMU/SWIOTLB are disabled. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6a660d196d87..052e1f102113 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -920,6 +920,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); +#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) { unsigned i; @@ -960,6 +961,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) ttm_pool_unpopulate(&tt->ttm); } EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); +#endif int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { -- cgit v1.2.3 From f9ebec52b5f115a0b06cdabe3036b858bfbb588a Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 18 Sep 2017 15:45:11 +0200 Subject: drm/ttm: remove unsued options from ttm_mem_global_alloc_page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Nobody is actually using that, remove it. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_memory.c | 6 ++---- drivers/gpu/drm/ttm/ttm_page_alloc.c | 3 +-- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 3 +-- include/drm/ttm/ttm_memory.h | 3 +-- 4 files changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 29855be96be0..4f9978cbc5bc 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -546,8 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, EXPORT_SYMBOL(ttm_mem_global_alloc); int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, - bool no_wait, bool interruptible) + struct page *page) { struct ttm_mem_zone *zone = NULL; @@ -564,8 +563,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) zone = glob->zone_kernel; #endif - return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, - interruptible); + return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, false, false); } void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 052e1f102113..74f465ea914b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -882,8 +882,7 @@ int ttm_pool_populate(struct ttm_tt *ttm) return -ENOMEM; } - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - false, false); + ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 06bc14b55e66..b8905bdd4143 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -902,8 +902,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) return -ENOMEM; } - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - false, false); + ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); if (unlikely(ret != 0)) { ttm_dma_unpopulate(ttm_dma, dev); return -ENOMEM; diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index c4520890f267..8184ff144176 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -150,8 +150,7 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, - bool no_wait, bool interruptible); + struct page *page); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page); extern size_t ttm_round_pot(size_t size); -- cgit v1.2.3 From d188bfa5532ce5b426681d8530ff1a9683eea0ad Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 4 Jul 2017 16:56:24 +0200 Subject: drm/ttm: add support for different pool sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correctly handle different page sizes in the memory accounting. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_memory.c | 10 +++++----- drivers/gpu/drm/ttm/ttm_page_alloc.c | 5 +++-- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 7 ++++--- include/drm/ttm/ttm_memory.h | 4 ++-- 4 files changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 4f9978cbc5bc..e96374990398 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -546,7 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, EXPORT_SYMBOL(ttm_mem_global_alloc); int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page) + struct page *page, uint64_t size) { struct ttm_mem_zone *zone = NULL; @@ -563,10 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) zone = glob->zone_kernel; #endif - return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, false, false); + return ttm_mem_global_alloc_zone(glob, zone, size, false, false); } -void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, + uint64_t size) { struct ttm_mem_zone *zone = NULL; @@ -577,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) zone = glob->zone_kernel; #endif - ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); + ttm_mem_global_free_zone(glob, zone, size); } - size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 74f465ea914b..e11fd76e06f4 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -882,7 +882,8 @@ int ttm_pool_populate(struct ttm_tt *ttm) return -ENOMEM; } - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); + ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], + PAGE_SIZE); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return -ENOMEM; @@ -909,7 +910,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) { if (ttm->pages[i]) { ttm_mem_global_free_page(ttm->glob->mem_glob, - ttm->pages[i]); + ttm->pages[i], PAGE_SIZE); ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags, ttm->caching_state); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index b8905bdd4143..53626578004c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -902,7 +902,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) return -ENOMEM; } - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); + ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], + pool->size); if (unlikely(ret != 0)) { ttm_dma_unpopulate(ttm_dma, dev); return -ENOMEM; @@ -967,13 +968,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) if (is_cached) { list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { ttm_mem_global_free_page(ttm->glob->mem_glob, - d_page->p); + d_page->p, pool->size); ttm_dma_page_put(pool, d_page); } } else { for (i = 0; i < count; i++) { ttm_mem_global_free_page(ttm->glob->mem_glob, - ttm->pages[i]); + ttm->pages[i], pool->size); } } diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 8184ff144176..2c1e3598effe 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -150,9 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page); + struct page *page, uint64_t size); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page); + struct page *page, uint64_t size); extern size_t ttm_round_pot(size_t size); extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); #endif -- cgit v1.2.3 From c6e839a3e299bbff991a3b4136f96ccaca4b276f Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 19 Sep 2017 15:20:42 +0200 Subject: drm/ttm: allocate/free multiple pages in a single call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Totally surprisingly this is more efficient than doing it page by page. Signed-off-by: Christian König Acked-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index e11fd76e06f4..482dd9aa2c84 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -873,15 +873,14 @@ int ttm_pool_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0; - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_get_pages(&ttm->pages[i], 1, - ttm->page_flags, - ttm->caching_state); - if (ret != 0) { - ttm_pool_unpopulate(ttm); - return -ENOMEM; - } + ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state); + if (unlikely(ret != 0)) { + ttm_pool_unpopulate(ttm); + return ret; + } + for (i = 0; i < ttm->num_pages; ++i) { ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], PAGE_SIZE); if (unlikely(ret != 0)) { @@ -908,14 +907,14 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) unsigned i; for (i = 0; i < ttm->num_pages; ++i) { - if (ttm->pages[i]) { - ttm_mem_global_free_page(ttm->glob->mem_glob, - ttm->pages[i], PAGE_SIZE); - ttm_put_pages(&ttm->pages[i], 1, - ttm->page_flags, - ttm->caching_state); - } + if (!ttm->pages[i]) + continue; + + ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i], + PAGE_SIZE); } + ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state); ttm->state = tt_unpopulated; } EXPORT_SYMBOL(ttm_pool_unpopulate); -- cgit v1.2.3 From 6056a1a565547743c5a87dc3d9c51d086acf9c27 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 20 Sep 2017 14:07:02 +0200 Subject: drm/ttm: DMA map/unmap consecutive pages as a whole v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of mapping them bit by bit map/unmap all consecutive pages as in one call. v2: test for consecutive pages instead of using compound page order. Signed-off-by: Christian König Acked-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 48 +++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 482dd9aa2c84..6c852e81660b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -922,16 +922,26 @@ EXPORT_SYMBOL(ttm_pool_unpopulate); #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) { - unsigned i; + unsigned i, j; int r; r = ttm_pool_populate(&tt->ttm); if (r) return r; - for (i = 0; i < tt->ttm.num_pages; i++) { + for (i = 0; i < tt->ttm.num_pages; ++i) { + struct page *p = tt->ttm.pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < tt->ttm.num_pages; ++j) { + if (++p != tt->ttm.pages[j]) + break; + + ++num_pages; + } + tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], - 0, PAGE_SIZE, + 0, num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, tt->dma_address[i])) { while (i--) { @@ -942,6 +952,11 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) ttm_pool_unpopulate(&tt->ttm); return -EFAULT; } + + for (j = 1; j < num_pages; ++j) { + tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; + ++i; + } } return 0; } @@ -949,13 +964,28 @@ EXPORT_SYMBOL(ttm_populate_and_map_pages); void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) { - unsigned i; - - for (i = 0; i < tt->ttm.num_pages; i++) { - if (tt->dma_address[i]) { - dma_unmap_page(dev, tt->dma_address[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); + unsigned i, j; + + for (i = 0; i < tt->ttm.num_pages;) { + struct page *p = tt->ttm.pages[i]; + size_t num_pages = 1; + + if (!tt->dma_address[i] || !tt->ttm.pages[i]) { + ++i; + continue; } + + for (j = i + 1; j < tt->ttm.num_pages; ++j) { + if (++p != tt->ttm.pages[j]) + break; + + ++num_pages; + } + + dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, + DMA_BIDIRECTIONAL); + + i += num_pages; } ttm_pool_unpopulate(&tt->ttm); } -- cgit v1.2.3 From 0284f1ead87463bc17cf5e81a24fc65c052486f3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 20 Sep 2017 15:06:12 +0200 Subject: drm/ttm: add transparent huge page support for cached allocations v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to allocate huge pages when it makes sense. v2: avoid compound pages for now Signed-off-by: Christian König Acked-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 50 ++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6c852e81660b..1bc6053b4581 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -685,12 +685,24 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, if (pool == NULL) { /* No pool for this memory type so free the pages */ - for (i = 0; i < npages; i++) { - if (pages[i]) { - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - __free_page(pages[i]); - pages[i] = NULL; + i = 0; + while (i < npages) { + unsigned order; + + if (!pages[i]) { + ++i; + continue; + } + + if (page_count(pages[i]) != 1) + pr_err("Erroneous page count. Leaking pages.\n"); + order = compound_order(pages[i]); + __free_pages(pages[i], order); + + order = 1 << order; + while (order) { + pages[i++] = NULL; + --order; } } return; @@ -740,12 +752,33 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* No pool for cached pages */ if (pool == NULL) { + unsigned i, j; + if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else gfp_flags |= GFP_HIGHUSER; - for (r = 0; r < npages; ++r) { + i = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + while (npages >= HPAGE_PMD_NR) { + gfp_t huge_flags = gfp_flags; + + huge_flags |= GFP_TRANSHUGE; + huge_flags &= ~__GFP_MOVABLE; + huge_flags &= ~__GFP_COMP; + p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); + if (!p) + break; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[i++] = p++; + + npages -= HPAGE_PMD_NR; + } +#endif + + while (npages) { p = alloc_page(gfp_flags); if (!p) { @@ -753,7 +786,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return -ENOMEM; } - pages[r] = p; + pages[i++] = p; + --npages; } return 0; } -- cgit v1.2.3 From 8593e9b85e0aa67db62ec395774021a139efc2cd Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 21 Sep 2017 11:28:25 +0200 Subject: drm/ttm: move more logic into ttm_page_pool_get_pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it easier to add huge page pool. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 98 +++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 1bc6053b4581..39747326bf3e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -627,19 +627,20 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, } /** - * Cut 'count' number of pages from the pool and put them on the return list. + * Allocate pages from the pool and put them on the return list. * - * @return count of pages still required to fulfill the request. + * @return zero for success or negative error code. */ -static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, - struct list_head *pages, - int ttm_flags, - enum ttm_caching_state cstate, - unsigned count) +static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, + struct list_head *pages, + int ttm_flags, + enum ttm_caching_state cstate, + unsigned count) { unsigned long irq_flags; struct list_head *p; unsigned i; + int r = 0; spin_lock_irqsave(&pool->lock, irq_flags); ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); @@ -672,7 +673,35 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, count = 0; out: spin_unlock_irqrestore(&pool->lock, irq_flags); - return count; + + /* clear the pages coming from the pool if requested */ + if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { + struct page *page; + + list_for_each_entry(page, pages, lru) { + if (PageHighMem(page)) + clear_highpage(page); + else + clear_page(page_address(page)); + } + } + + /* If pool didn't have enough pages allocate new one. */ + if (count) { + gfp_t gfp_flags = pool->gfp_flags; + + /* set zero flag for page allocation if required */ + if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + + /* ttm_alloc_new_pages doesn't reference pool so we can run + * multiple requests in parallel. + **/ + r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, + count); + } + + return r; } /* Put all pages in pages list to correct pool to wait for reuse */ @@ -742,18 +771,18 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct list_head plist; struct page *p = NULL; - gfp_t gfp_flags = GFP_USER; unsigned count; int r; - /* set zero flag for page allocation if required */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - /* No pool for cached pages */ if (pool == NULL) { + gfp_t gfp_flags = GFP_USER; unsigned i, j; + /* set zero flag for page allocation if required */ + if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else @@ -792,44 +821,21 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; } - /* combine zero flag to pool flags */ - gfp_flags |= pool->gfp_flags; - /* First we take pages from the pool */ INIT_LIST_HEAD(&plist); - npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + count = 0; - list_for_each_entry(p, &plist, lru) { + list_for_each_entry(p, &plist, lru) pages[count++] = p; - } - - /* clear the pages coming from the pool if requested */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - list_for_each_entry(p, &plist, lru) { - if (PageHighMem(p)) - clear_highpage(p); - else - clear_page(page_address(p)); - } - } - /* If pool didn't have enough pages allocate new one. */ - if (npages > 0) { - /* ttm_alloc_new_pages doesn't reference pool so we can run - * multiple requests in parallel. - **/ - INIT_LIST_HEAD(&plist); - r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); - list_for_each_entry(p, &plist, lru) { - pages[count++] = p; - } - if (r) { - /* If there is any pages in the list put them back to - * the pool. */ - pr_err("Failed to allocate extra pages for large request\n"); - ttm_put_pages(pages, count, flags, cstate); - return r; - } + if (r) { + /* If there is any pages in the list put them back to + * the pool. + */ + pr_err("Failed to allocate extra pages for large request\n"); + ttm_put_pages(pages, count, flags, cstate); + return r; } return 0; -- cgit v1.2.3 From 6ed4e2e673d348df6623012a628a8ab8624e3222 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Oct 2017 14:27:34 +0200 Subject: drm/ttm: add transparent huge page support for wc or uc allocations v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new huge page pool and try to allocate from it when it makes sense. v2: avoid compound pages for now Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 136 ++++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 39747326bf3e..b6f16e7ffff3 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -95,7 +95,7 @@ struct ttm_pool_opts { unsigned small; }; -#define NUM_POOLS 4 +#define NUM_POOLS 6 /** * struct ttm_pool_manager - Holds memory pools for fst allocation @@ -122,6 +122,8 @@ struct ttm_pool_manager { struct ttm_page_pool uc_pool; struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool uc_pool_dma32; + struct ttm_page_pool wc_pool_huge; + struct ttm_page_pool uc_pool_huge; } ; }; }; @@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray) /** * Select the right pool or requested caching state and ttm flags. */ -static struct ttm_page_pool *ttm_get_pool(int flags, - enum ttm_caching_state cstate) +static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, + enum ttm_caching_state cstate) { int pool_index; @@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags, else pool_index = 0x1; - if (flags & TTM_PAGE_FLAG_DMA32) + if (flags & TTM_PAGE_FLAG_DMA32) { + if (huge) + return NULL; pool_index |= 0x2; + } else if (huge) { + pool_index |= 0x4; + } + return &_manager->pools[pool_index]; } @@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, * pages returned in pages array. */ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, - int ttm_flags, enum ttm_caching_state cstate, unsigned count) + int ttm_flags, enum ttm_caching_state cstate, + unsigned count, unsigned order) { struct page **caching_array; struct page *p; int r = 0; - unsigned i, cpages; + unsigned i, j, cpages; + unsigned npages = 1 << order; unsigned max_cpages = min(count, (unsigned)(PAGE_SIZE/sizeof(struct page *))); @@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, } for (i = 0, cpages = 0; i < count; ++i) { - p = alloc_page(gfp_flags); + p = alloc_pages(gfp_flags, order); if (!p) { pr_err("Unable to get page %u\n", i); @@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, goto out; } + list_add(&p->lru, pages); + #ifdef CONFIG_HIGHMEM /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */ - if (!PageHighMem(p)) + if (PageHighMem(p)) + continue; + #endif - { - caching_array[cpages++] = p; + for (j = 0; j < npages; ++j) { + caching_array[cpages++] = p++; if (cpages == max_cpages) { r = ttm_set_pages_caching(caching_array, @@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, cpages = 0; } } - - list_add(&p->lru, pages); } if (cpages) { @@ -573,9 +585,9 @@ out: * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ -static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, - int ttm_flags, enum ttm_caching_state cstate, unsigned count, - unsigned long *irq_flags) +static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, + enum ttm_caching_state cstate, + unsigned count, unsigned long *irq_flags) { struct page *p; int r; @@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, INIT_LIST_HEAD(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, - cstate, alloc_size); + cstate, alloc_size, 0); spin_lock_irqsave(&pool->lock, *irq_flags); if (!r) { @@ -635,7 +647,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, - unsigned count) + unsigned count, unsigned order) { unsigned long irq_flags; struct list_head *p; @@ -643,7 +655,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, int r = 0; spin_lock_irqsave(&pool->lock, irq_flags); - ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); + if (!order) + ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, + &irq_flags); if (count >= pool->npages) { /* take all pages from the pool */ @@ -698,7 +712,7 @@ out: * multiple requests in parallel. **/ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, - count); + count, order); } return r; @@ -708,8 +722,9 @@ out: static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { + struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); + struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); unsigned long irq_flags; - struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); unsigned i; if (pool == NULL) { @@ -737,8 +752,48 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, return; } + i = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (huge) { + unsigned max_size, n2free; + + spin_lock_irqsave(&huge->lock, irq_flags); + while (i < npages) { + struct page *p = pages[i]; + unsigned j; + + if (!p) + break; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + if (p++ != pages[i + j]) + break; + + if (j != HPAGE_PMD_NR) + break; + + list_add_tail(&pages[i]->lru, &huge->list); + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[i++] = NULL; + huge->npages++; + } + + /* Check that we don't go over the pool limit */ + max_size = _manager->options.max_size; + max_size /= HPAGE_PMD_NR; + if (huge->npages > max_size) + n2free = huge->npages - max_size; + else + n2free = 0; + spin_unlock_irqrestore(&huge->lock, irq_flags); + if (n2free) + ttm_page_pool_free(huge, n2free, false); + } +#endif + spin_lock_irqsave(&pool->lock, irq_flags); - for (i = 0; i < npages; i++) { + while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); @@ -746,6 +801,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, pages[i] = NULL; pool->npages++; } + ++i; } /* Check that we don't go over the pool limit */ npages = 0; @@ -768,7 +824,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { - struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); + struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); + struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); struct list_head plist; struct page *p = NULL; unsigned count; @@ -821,11 +878,28 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; } - /* First we take pages from the pool */ + count = 0; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (huge && npages >= HPAGE_PMD_NR) { + INIT_LIST_HEAD(&plist); + ttm_page_pool_get_pages(huge, &plist, flags, cstate, + npages / HPAGE_PMD_NR, + HPAGE_PMD_ORDER); + + list_for_each_entry(p, &plist, lru) { + unsigned j; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[count++] = &p[j]; + } + } +#endif + INIT_LIST_HEAD(&plist); - r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, + npages - count, 0); - count = 0; list_for_each_entry(p, &plist, lru) pages[count++] = p; @@ -872,6 +946,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ttm_page_pool_init_locked(&_manager->uc_pool_dma32, GFP_USER | GFP_DMA32, "uc dma"); + ttm_page_pool_init_locked(&_manager->wc_pool_huge, + GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), + "wc huge"); + + ttm_page_pool_init_locked(&_manager->uc_pool_huge, + GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) + , "uc huge"); + _manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; @@ -1041,12 +1123,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) seq_printf(m, "No pool allocator running.\n"); return 0; } - seq_printf(m, "%6s %12s %13s %8s\n", + seq_printf(m, "%7s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i]; - seq_printf(m, "%6s %12ld %13ld %8d\n", + seq_printf(m, "%7s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); } -- cgit v1.2.3 From 7d0a42823ba5bdc11b4933749b147b8a6a6caae5 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 12 Oct 2017 07:25:08 -0400 Subject: drm/ttm: Fix unused variables with huge page support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b6f16e7ffff3..95022473704b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -723,7 +723,9 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); +#endif unsigned long irq_flags; unsigned i; @@ -825,7 +827,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); +#endif struct list_head plist; struct page *p = NULL; unsigned count; @@ -834,7 +838,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* No pool for cached pages */ if (pool == NULL) { gfp_t gfp_flags = GFP_USER; - unsigned i, j; + unsigned i; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + unsigned j; +#endif /* set zero flag for page allocation if required */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) -- cgit v1.2.3 From 5c42c64f7d54ba560b0b001e4e73e4a1aeed1355 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 12 Oct 2017 19:28:42 +0200 Subject: drm/ttm: fix the fix for huge compound pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't use compound pages at the moment. Take this into account when freeing them. Signed-off-by: Christian König Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 95022473704b..4d688c8d7853 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -733,22 +733,33 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, /* No pool for this memory type so free the pages */ i = 0; while (i < npages) { - unsigned order; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct page *p = pages[i]; +#endif + unsigned order = 0, j; if (!pages[i]) { ++i; continue; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + for (j = 0; j < HPAGE_PMD_NR; ++j) + if (p++ != pages[i + j]) + break; + + if (j == HPAGE_PMD_NR) + order = HPAGE_PMD_ORDER; +#endif + if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); - order = compound_order(pages[i]); __free_pages(pages[i], order); - order = 1 << order; - while (order) { + j = 1 << order; + while (j) { pages[i++] = NULL; - --order; + --j; } } return; -- cgit v1.2.3 From 767601d100a53e653233aebca7c262ce0addfa99 Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Fri, 3 Nov 2017 16:00:35 +0100 Subject: drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Memory allocation failure should generally be handled gracefully by callers. In particular, with transparent hugepage support, attempts to allocate huge pages can fail under memory pressure, but the callers fall back to allocating individual pages instead. In that case, there would be spurious [TTM] Unable to get page %u error messages in dmesg. Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 13 ++++++------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 12 ++++++------ 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 4d688c8d7853..316f831ad5f0 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { - pr_err("Failed to allocate memory for pool free operation\n"); + pr_debug("Failed to allocate memory for pool free operation\n"); return 0; } @@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); if (!caching_array) { - pr_err("Unable to allocate table for new pages\n"); + pr_debug("Unable to allocate table for new pages\n"); return -ENOMEM; } @@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, p = alloc_pages(gfp_flags, order); if (!p) { - pr_err("Unable to get page %u\n", i); + pr_debug("Unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ @@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, ++pool->nrefills; pool->npages += alloc_size; } else { - pr_err("Failed to fill pool (%p)\n", pool); + pr_debug("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &new_pages, lru) { ++cpages; @@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, while (npages) { p = alloc_page(gfp_flags); if (!p) { - - pr_err("Unable to allocate page\n"); + pr_debug("Unable to allocate page\n"); return -ENOMEM; } @@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* If there is any pages in the list put them back to * the pool. */ - pr_err("Failed to allocate extra pages for large request\n"); + pr_debug("Failed to allocate extra pages for large request\n"); ttm_put_pages(pages, count, flags, cstate); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 96ad12906621..6b2627fe9bc1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, GFP_KERNEL); if (!pages_to_free) { - pr_err("%s: Failed to allocate memory for pool free operation\n", + pr_debug("%s: Failed to allocate memory for pool free operation\n", pool->dev_name); return 0; } @@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); if (!caching_array) { - pr_err("%s: Unable to allocate table for new pages\n", + pr_debug("%s: Unable to allocate table for new pages\n", pool->dev_name); return -ENOMEM; } @@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, for (i = 0, cpages = 0; i < count; ++i) { dma_p = __ttm_dma_alloc_page(pool); if (!dma_p) { - pr_err("%s: Unable to get page %u\n", - pool->dev_name, i); + pr_debug("%s: Unable to get page %u\n", + pool->dev_name, i); /* store already allocated pages in the pool after * setting the caching state */ @@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, struct dma_page *d_page; unsigned cpages = 0; - pr_err("%s: Failed to fill %s pool (r:%d)!\n", - pool->dev_name, pool->name, r); + pr_debug("%s: Failed to fill %s pool (r:%d)!\n", + pool->dev_name, pool->name, r); list_for_each_entry(d_page, &d_pages, page_list) { cpages++; -- cgit v1.2.3