Skip to content

Commit 7df542a

Browse files
isilenceaxboe
authored andcommitted
io_uring/zcrx: move count check into zcrx_get_free_niov
Instead of relying on the caller of __io_zcrx_get_free_niov() to check that there are free niovs available (i.e. free_count > 0), move the check into the function and return NULL if can't allocate. It consolidates the free count checks, and it'll be easier to extend the niov free list allocator in the future. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://patch.msgid.link/6df04a6b3a6170f86d4345da9864f238311163f9.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 898ad80 commit 7df542a

1 file changed

Lines changed: 21 additions & 17 deletions

File tree

io_uring/zcrx.c

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -590,6 +590,19 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov)
590590
area->freelist[area->free_count++] = net_iov_idx(niov);
591591
}
592592

593+
static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area)
594+
{
595+
unsigned niov_idx;
596+
597+
lockdep_assert_held(&area->freelist_lock);
598+
599+
if (unlikely(!area->free_count))
600+
return NULL;
601+
602+
niov_idx = area->freelist[--area->free_count];
603+
return &area->nia.niovs[niov_idx];
604+
}
605+
593606
static void io_zcrx_return_niov(struct net_iov *niov)
594607
{
595608
netmem_ref netmem = net_iov_to_netmem(niov);
@@ -903,16 +916,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
903916
return ret;
904917
}
905918

906-
static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
907-
{
908-
unsigned niov_idx;
909-
910-
lockdep_assert_held(&area->freelist_lock);
911-
912-
niov_idx = area->freelist[--area->free_count];
913-
return &area->nia.niovs[niov_idx];
914-
}
915-
916919
static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id)
917920
{
918921
return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_0);
@@ -1054,12 +1057,15 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
10541057

10551058
guard(spinlock_bh)(&area->freelist_lock);
10561059

1057-
while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
1058-
struct net_iov *niov = __io_zcrx_get_free_niov(area);
1059-
netmem_ref netmem = net_iov_to_netmem(niov);
1060+
while (pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
1061+
struct net_iov *niov = zcrx_get_free_niov(area);
1062+
netmem_ref netmem;
10601063

1064+
if (!niov)
1065+
break;
10611066
net_mp_niov_set_page_pool(pp, niov);
10621067
io_zcrx_sync_for_device(pp, niov);
1068+
netmem = net_iov_to_netmem(niov);
10631069
net_mp_netmem_place_in_cache(pp, netmem);
10641070
}
10651071
}
@@ -1284,10 +1290,8 @@ static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
12841290
if (area->mem.is_dmabuf)
12851291
return NULL;
12861292

1287-
scoped_guard(spinlock_bh, &area->freelist_lock) {
1288-
if (area->free_count)
1289-
niov = __io_zcrx_get_free_niov(area);
1290-
}
1293+
scoped_guard(spinlock_bh, &area->freelist_lock)
1294+
niov = zcrx_get_free_niov(area);
12911295

12921296
if (niov)
12931297
page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);

0 commit comments

Comments
 (0)