Skip to content

Commit 0180d6f

Browse files
committed
ttm/pool: make pool shrinker NUMA aware (v2)
This enable NUMA awareness for the shrinker on the ttm pools. Cc: Christian Koenig <christian.koenig@amd.com> Cc: Dave Chinner <david@fromorbit.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
1 parent c21066e commit 0180d6f

1 file changed

Lines changed: 21 additions & 17 deletions

File tree

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -423,26 +423,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
423423
return NULL;
424424
}
425425

426-
/* Free pages using the global shrinker list */
427-
static unsigned int ttm_pool_shrink(void)
426+
/* Free pages using the per-node shrinker list */
427+
static unsigned int ttm_pool_shrink(int nid, unsigned long num_to_free)
428428
{
429+
LIST_HEAD(dispose);
429430
struct ttm_pool_type *pt;
430431
unsigned int num_pages;
431-
struct page *p;
432432

433433
down_read(&pool_shrink_rwsem);
434434
spin_lock(&shrinker_lock);
435435
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
436436
list_move_tail(&pt->shrinker_list, &shrinker_list);
437437
spin_unlock(&shrinker_lock);
438438

439-
p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
440-
if (p) {
441-
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
442-
num_pages = 1 << pt->order;
443-
} else {
444-
num_pages = 0;
445-
}
439+
num_pages = list_lru_walk_node(&pt->pages, nid, pool_move_to_dispose_list, &dispose, &num_to_free);
440+
num_pages *= 1 << pt->order;
441+
442+
ttm_pool_dispose_list(pt, &dispose);
446443
up_read(&pool_shrink_rwsem);
447444

448445
return num_pages;
@@ -794,6 +791,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
794791
pt = ttm_pool_select_type(pool, page_caching, order);
795792
if (pt && allow_pools)
796793
p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
794+
797795
/*
798796
* If that fails or previously failed, allocate from system.
799797
* Note that this also disallows additional pool allocations using
@@ -944,8 +942,10 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
944942
{
945943
ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
946944

947-
while (atomic_long_read(&allocated_pages) > page_pool_size)
948-
ttm_pool_shrink();
945+
while (atomic_long_read(&allocated_pages) > page_pool_size) {
946+
unsigned long diff = atomic_long_read(&allocated_pages) - page_pool_size;
947+
ttm_pool_shrink(ttm_pool_nid(pool), diff);
948+
}
949949
}
950950
EXPORT_SYMBOL(ttm_pool_free);
951951

@@ -1200,7 +1200,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
12001200
unsigned long num_freed = 0;
12011201

12021202
do
1203-
num_freed += ttm_pool_shrink();
1203+
num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
12041204
while (num_freed < sc->nr_to_scan &&
12051205
atomic_long_read(&allocated_pages));
12061206

@@ -1328,11 +1328,15 @@ static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
13281328
.nr_to_scan = TTM_SHRINKER_BATCH,
13291329
};
13301330
unsigned long count;
1331+
int nid;
13311332

13321333
fs_reclaim_acquire(GFP_KERNEL);
1333-
count = ttm_pool_shrinker_count(mm_shrinker, &sc);
1334-
seq_printf(m, "%lu/%lu\n", count,
1335-
ttm_pool_shrinker_scan(mm_shrinker, &sc));
1334+
for_each_node(nid) {
1335+
sc.nid = nid;
1336+
count = ttm_pool_shrinker_count(mm_shrinker, &sc);
1337+
seq_printf(m, "%d: %lu/%lu\n", nid, count,
1338+
ttm_pool_shrinker_scan(mm_shrinker, &sc));
1339+
}
13361340
fs_reclaim_release(GFP_KERNEL);
13371341

13381342
return 0;
@@ -1380,7 +1384,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
13801384
#endif
13811385
#endif
13821386

1383-
mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
1387+
mm_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "drm-ttm_pool");
13841388
if (!mm_shrinker)
13851389
return -ENOMEM;
13861390

0 commit comments

Comments
 (0)