summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_pool.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c111
1 files changed, 70 insertions, 41 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index aa116a7bbae3..18c342a919a2 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -47,6 +47,11 @@
#include "ttm_module.h"
+#define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
+/* Some architectures have a weird PMD_SHIFT */
+#define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)
+
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -65,11 +70,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[MAX_ORDER];
-static struct ttm_pool_type global_uncached[MAX_ORDER];
+static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER];
+static struct ttm_pool_type global_uncached[TTM_DIM_ORDER];
-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER];
+static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -368,6 +373,43 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
}
/**
+ * ttm_pool_free_range() - Free a range of TTM pages
+ * @pool: The pool used for allocating.
+ * @tt: The struct ttm_tt holding the page pointers.
+ * @caching: The page caching mode used by the range.
+ * @start_page: index for first page to free.
+ * @end_page: index for last page to free + 1.
+ *
+ * During allocation the ttm_tt page-vector may be populated with ranges of
+ * pages with different attributes if allocation hit an error without being
+ * able to completely fulfill the allocation. This function can be used
+ * to free these individual ranges.
+ */
+static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+{
+ struct page **pages = tt->pages;
+ unsigned int order;
+ pgoff_t i, nr;
+
+ for (i = start_page; i < end_page; i += nr, pages += nr) {
+ struct ttm_pool_type *pt = NULL;
+
+ order = ttm_pool_page_order(pool, *pages);
+ nr = (1UL << order);
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i], nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ if (pt)
+ ttm_pool_type_give(pt, *pages);
+ else
+ ttm_pool_free_page(pool, caching, order, *pages);
+ }
+}
+
+/**
* ttm_pool_alloc - Fill a ttm_tt object
*
* @pool: ttm_pool to use
@@ -382,12 +424,14 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
- unsigned long num_pages = tt->num_pages;
+ pgoff_t num_pages = tt->num_pages;
dma_addr_t *dma_addr = tt->dma_address;
struct page **caching = tt->pages;
struct page **pages = tt->pages;
+ enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
- unsigned int i, order;
+ pgoff_t caching_divide;
+ unsigned int order;
struct page *p;
int r;
@@ -405,11 +449,12 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
+ page_caching = tt->caching;
pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) {
@@ -418,6 +463,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
if (r)
goto error_free_page;
+ caching = pages;
do {
r = ttm_pool_page_allocated(pool, order, p,
&dma_addr,
@@ -426,14 +472,15 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
if (r)
goto error_free_page;
+ caching = pages;
if (num_pages < (1 << order))
break;
p = ttm_pool_type_take(pt);
} while (p);
- caching = pages;
}
+ page_caching = ttm_cached;
while (num_pages >= (1 << order) &&
(p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
@@ -442,6 +489,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
tt->caching);
if (r)
goto error_free_page;
+ caching = pages;
}
r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
&num_pages, &pages);
@@ -468,15 +516,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
return 0;
error_free_page:
- ttm_pool_free_page(pool, tt->caching, order, p);
+ ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
num_pages = tt->num_pages - num_pages;
- for (i = 0; i < num_pages; ) {
- order = ttm_pool_page_order(pool, tt->pages[i]);
- ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
- i += 1 << order;
- }
+ caching_divide = caching - tt->pages;
+ ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
return r;
}
@@ -492,27 +538,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
*/
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
- unsigned int i;
-
- for (i = 0; i < tt->num_pages; ) {
- struct page *p = tt->pages[i];
- unsigned int order, num_pages;
- struct ttm_pool_type *pt;
-
- order = ttm_pool_page_order(pool, p);
- num_pages = 1ULL << order;
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
-
- pt = ttm_pool_select_type(pool, tt->caching, order);
- if (pt)
- ttm_pool_type_give(pt, tt->pages[i]);
- else
- ttm_pool_free_page(pool, tt->caching, order,
- tt->pages[i]);
-
- i += num_pages;
- }
+ ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
while (atomic_long_read(&allocated_pages) > page_pool_size)
ttm_pool_shrink();
@@ -542,7 +568,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
+ for (j = 0; j < TTM_DIM_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
@@ -562,7 +588,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
+ for (j = 0; j < TTM_DIM_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
@@ -616,7 +642,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i < MAX_ORDER; ++i)
+ for (i = 0; i < TTM_DIM_ORDER; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -627,7 +653,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i < MAX_ORDER; ++i)
+ for (i = 0; i < TTM_DIM_ORDER; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -730,13 +756,16 @@ int ttm_pool_mgr_init(unsigned long num_pages)
{
unsigned int i;
+ BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
+ BUILD_BUG_ON(TTM_DIM_ORDER < 1);
+
if (!page_pool_size)
page_pool_size = num_pages;
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i < MAX_ORDER; ++i) {
+ for (i = 0; i < TTM_DIM_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -769,7 +798,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i < MAX_ORDER; ++i) {
+ for (i = 0; i < TTM_DIM_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);