Skip to content

Commit 444e2a1

Browse files
committed
ttm/pool: port to list_lru. (v2)
This is an initial port of the TTM pools for write combined and uncached pages to use the list_lru. This makes the pool's more NUMA aware and avoids needing separate NUMA pools (later commit enables this). Cc: Christian Koenig <christian.koenig@amd.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Dave Chinner <david@fromorbit.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
1 parent ae80122 commit 444e2a1

5 files changed

Lines changed: 83 additions & 49 deletions

File tree

drivers/gpu/drm/ttm/tests/ttm_device_test.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ static void ttm_device_init_pools(struct kunit *test)
176176

177177
if (ttm_pool_uses_dma_alloc(pool))
178178
KUNIT_ASSERT_FALSE(test,
179-
list_empty(&pt.pages));
179+
!list_lru_count(&pt.pages));
180180
}
181181
}
182182
}

drivers/gpu/drm/ttm/tests/ttm_pool_test.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -248,15 +248,15 @@ static void ttm_pool_alloc_order_caching_match(struct kunit *test)
248248
pool = ttm_pool_pre_populated(test, size, caching);
249249

250250
pt = &pool->caching[caching].orders[order];
251-
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
251+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
252252

253253
tt = ttm_tt_kunit_init(test, 0, caching, size);
254254
KUNIT_ASSERT_NOT_NULL(test, tt);
255255

256256
err = ttm_pool_alloc(pool, tt, &simple_ctx);
257257
KUNIT_ASSERT_EQ(test, err, 0);
258258

259-
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
259+
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
260260

261261
ttm_pool_free(pool, tt);
262262
ttm_tt_fini(tt);
@@ -282,17 +282,17 @@ static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
282282
tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
283283
KUNIT_ASSERT_NOT_NULL(test, tt);
284284

285-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
286-
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
285+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
286+
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
287287

288288
err = ttm_pool_alloc(pool, tt, &simple_ctx);
289289
KUNIT_ASSERT_EQ(test, err, 0);
290290

291291
ttm_pool_free(pool, tt);
292292
ttm_tt_fini(tt);
293293

294-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
295-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
294+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
295+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
296296

297297
ttm_pool_fini(pool);
298298
}
@@ -316,17 +316,17 @@ static void ttm_pool_alloc_order_mismatch(struct kunit *test)
316316
tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
317317
KUNIT_ASSERT_NOT_NULL(test, tt);
318318

319-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
320-
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
319+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
320+
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
321321

322322
err = ttm_pool_alloc(pool, tt, &simple_ctx);
323323
KUNIT_ASSERT_EQ(test, err, 0);
324324

325325
ttm_pool_free(pool, tt);
326326
ttm_tt_fini(tt);
327327

328-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
329-
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
328+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
329+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
330330

331331
ttm_pool_fini(pool);
332332
}
@@ -352,12 +352,12 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
352352
ttm_pool_alloc(pool, tt, &simple_ctx);
353353

354354
pt = &pool->caching[caching].orders[order];
355-
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
355+
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
356356

357357
ttm_pool_free(pool, tt);
358358
ttm_tt_fini(tt);
359359

360-
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
360+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
361361

362362
ttm_pool_fini(pool);
363363
}
@@ -383,12 +383,12 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
383383
ttm_pool_alloc(pool, tt, &simple_ctx);
384384

385385
pt = &pool->caching[caching].orders[order];
386-
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
386+
KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
387387

388388
ttm_pool_free(pool, tt);
389389
ttm_tt_fini(tt);
390390

391-
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
391+
KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
392392

393393
ttm_pool_fini(pool);
394394
}
@@ -404,11 +404,11 @@ static void ttm_pool_fini_basic(struct kunit *test)
404404
pool = ttm_pool_pre_populated(test, size, caching);
405405
pt = &pool->caching[caching].orders[order];
406406

407-
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
407+
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
408408

409409
ttm_pool_fini(pool);
410410

411-
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
411+
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
412412
}
413413

414414
static struct kunit_case ttm_pool_test_cases[] = {

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 62 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,16 @@ static struct list_head shrinker_list;
132132
static struct shrinker *mm_shrinker;
133133
static DECLARE_RWSEM(pool_shrink_rwsem);
134134

135+
static int ttm_pool_nid(struct ttm_pool *pool)
136+
{
137+
int nid = NUMA_NO_NODE;
138+
if (pool)
139+
nid = pool->nid;
140+
if (nid == NUMA_NO_NODE)
141+
nid = numa_node_id();
142+
return nid;
143+
}
144+
135145
/* Allocate pages of size 1 << order with the given gfp_flags */
136146
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
137147
unsigned int order)
@@ -297,30 +307,41 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
297307
clear_page(page_address(p + i));
298308
}
299309

300-
spin_lock(&pt->lock);
301-
list_add(&p->lru, &pt->pages);
302-
spin_unlock(&pt->lock);
310+
INIT_LIST_HEAD(&p->lru);
311+
rcu_read_lock();
312+
list_lru_add(&pt->pages, &p->lru, page_to_nid(p), NULL);
313+
rcu_read_unlock();
303314
atomic_long_add(1 << pt->order, &allocated_pages);
304315

305316
mod_lruvec_page_state(p, NR_GPU_ACTIVE, -num_pages);
306317
mod_lruvec_page_state(p, NR_GPU_RECLAIM, num_pages);
307318
}
308319

320+
static enum lru_status take_one_from_lru(struct list_head *item,
321+
struct list_lru_one *list,
322+
void *cb_arg)
323+
{
324+
struct page **out_page = cb_arg;
325+
struct page *p = container_of(item, struct page, lru);
326+
list_lru_isolate(list, item);
327+
328+
*out_page = p;
329+
return LRU_REMOVED;
330+
}
331+
309332
/* Take pages from a specific pool_type, return NULL when nothing available */
310-
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
333+
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid)
311334
{
312-
struct page *p;
335+
int ret;
336+
struct page *p = NULL;
337+
unsigned long nr_to_walk = 1;
313338

314-
spin_lock(&pt->lock);
315-
p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
316-
if (p) {
339+
ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&p, &nr_to_walk);
340+
if (ret == 1 && p) {
317341
atomic_long_sub(1 << pt->order, &allocated_pages);
318342
mod_lruvec_page_state(p, NR_GPU_ACTIVE, (1 << pt->order));
319343
mod_lruvec_page_state(p, NR_GPU_RECLAIM, -(1 << pt->order));
320-
list_del(&p->lru);
321344
}
322-
spin_unlock(&pt->lock);
323-
324345
return p;
325346
}
326347

@@ -331,25 +352,47 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
331352
pt->pool = pool;
332353
pt->caching = caching;
333354
pt->order = order;
334-
spin_lock_init(&pt->lock);
335-
INIT_LIST_HEAD(&pt->pages);
355+
list_lru_init(&pt->pages);
336356

337357
spin_lock(&shrinker_lock);
338358
list_add_tail(&pt->shrinker_list, &shrinker_list);
339359
spin_unlock(&shrinker_lock);
340360
}
341361

362+
static enum lru_status pool_move_to_dispose_list(struct list_head *item,
363+
struct list_lru_one *list,
364+
void *cb_arg)
365+
{
366+
struct list_head *dispose = cb_arg;
367+
368+
list_lru_isolate_move(list, item, dispose);
369+
370+
return LRU_REMOVED;
371+
}
372+
373+
static void ttm_pool_dispose_list(struct ttm_pool_type *pt,
374+
struct list_head *dispose)
375+
{
376+
while (!list_empty(dispose)) {
377+
struct page *p;
378+
p = list_first_entry(dispose, struct page, lru);
379+
list_del_init(&p->lru);
380+
atomic_long_sub(1 << pt->order, &allocated_pages);
381+
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
382+
}
383+
}
384+
342385
/* Remove a pool_type from the global shrinker list and free all pages */
343386
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
344387
{
345-
struct page *p;
388+
LIST_HEAD(dispose);
346389

347390
spin_lock(&shrinker_lock);
348391
list_del(&pt->shrinker_list);
349392
spin_unlock(&shrinker_lock);
350393

351-
while ((p = ttm_pool_type_take(pt)))
352-
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
394+
list_lru_walk(&pt->pages, pool_move_to_dispose_list, &dispose, LONG_MAX);
395+
ttm_pool_dispose_list(pt, &dispose);
353396
}
354397

355398
/* Return the pool_type to use for the given caching and order */
@@ -399,7 +442,7 @@ static unsigned int ttm_pool_shrink(void)
399442
list_move_tail(&pt->shrinker_list, &shrinker_list);
400443
spin_unlock(&shrinker_lock);
401444

402-
p = ttm_pool_type_take(pt);
445+
p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
403446
if (p) {
404447
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
405448
num_pages = 1 << pt->order;
@@ -756,7 +799,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
756799
p = NULL;
757800
pt = ttm_pool_select_type(pool, page_caching, order);
758801
if (pt && allow_pools)
759-
p = ttm_pool_type_take(pt);
802+
p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
760803
/*
761804
* If that fails or previously failed, allocate from system.
762805
* Note that this also disallows additional pool allocations using
@@ -1185,16 +1228,7 @@ static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
11851228
/* Count the number of pages available in a pool_type */
11861229
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
11871230
{
1188-
unsigned int count = 0;
1189-
struct page *p;
1190-
1191-
spin_lock(&pt->lock);
1192-
/* Only used for debugfs, the overhead doesn't matter */
1193-
list_for_each_entry(p, &pt->pages, lru)
1194-
++count;
1195-
spin_unlock(&pt->lock);
1196-
1197-
return count;
1231+
return list_lru_count(&pt->pages);
11981232
}
11991233

12001234
/* Print a nice header for the order */

include/drm/ttm/ttm_pool.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include <linux/mmzone.h>
3030
#include <linux/llist.h>
3131
#include <linux/spinlock.h>
32+
#include <linux/list_lru.h>
3233
#include <drm/ttm/ttm_caching.h>
3334

3435
struct device;
@@ -45,8 +46,7 @@ struct ttm_tt;
4546
* @order: the allocation order our pages have
4647
* @caching: the caching type our pages have
4748
* @shrinker_list: our place on the global shrinker list
48-
* @lock: protection of the page list
49-
* @pages: the list of pages in the pool
49+
* @pages: the lru_list of pages in the pool
5050
*/
5151
struct ttm_pool_type {
5252
struct ttm_pool *pool;
@@ -55,8 +55,7 @@ struct ttm_pool_type {
5555

5656
struct list_head shrinker_list;
5757

58-
spinlock_t lock;
59-
struct list_head pages;
58+
struct list_lru pages;
6059
};
6160

6261
/**

mm/list_lru.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
179179
unlock_list_lru(l, false);
180180
return false;
181181
}
182+
EXPORT_SYMBOL_GPL(list_lru_add);
182183

183184
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
184185
{

0 commit comments

Comments
 (0)