Skip to content

Commit c098913

Browse files
isilenceaxboe
authored andcommitted
io_uring/zcrx: netmem array as refiling format
Instead of peeking into page pool allocation cache directly or via net_mp_netmem_place_in_cache(), pass a netmem array around. It's a better intermediate format, e.g. you can have it on stack and reuse the refilling code and decouples it from page pools a bit more. It still points into the page pool directly, there will be no additional copies. As the next step, we can change the callback prototype to take the netmem array from page pool. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://patch.msgid.link/9d8549adb7ef6672daf2d8a52858ce5926279a82.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 48f253d commit c098913

1 file changed

Lines changed: 25 additions & 15 deletions

File tree

io_uring/zcrx.c

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1011,19 +1011,21 @@ static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
10111011
return true;
10121012
}
10131013

1014-
static void io_zcrx_ring_refill(struct page_pool *pp,
1015-
struct io_zcrx_ifq *ifq)
1014+
static unsigned io_zcrx_ring_refill(struct page_pool *pp,
1015+
struct io_zcrx_ifq *ifq,
1016+
netmem_ref *netmems, unsigned to_alloc)
10161017
{
10171018
struct zcrx_rq *rq = &ifq->rq;
10181019
unsigned int mask = rq->nr_entries - 1;
10191020
unsigned int entries;
1021+
unsigned allocated = 0;
10201022

10211023
guard(spinlock_bh)(&rq->lock);
10221024

10231025
entries = zcrx_rq_entries(rq);
1024-
entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
1026+
entries = min_t(unsigned, entries, to_alloc);
10251027
if (unlikely(!entries))
1026-
return;
1028+
return 0;
10271029

10281030
do {
10291031
struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask);
@@ -1045,48 +1047,56 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
10451047
}
10461048

10471049
io_zcrx_sync_for_device(pp, niov);
1048-
net_mp_netmem_place_in_cache(pp, netmem);
1050+
netmems[allocated] = netmem;
1051+
allocated++;
10491052
} while (--entries);
10501053

10511054
smp_store_release(&rq->ring->head, rq->cached_head);
1055+
return allocated;
10521056
}
10531057

1054-
static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
1058+
static unsigned io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq,
1059+
netmem_ref *netmems, unsigned to_alloc)
10551060
{
10561061
struct io_zcrx_area *area = ifq->area;
1062+
unsigned allocated = 0;
10571063

10581064
guard(spinlock_bh)(&area->freelist_lock);
10591065

1060-
while (pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
1066+
for (allocated = 0; allocated < to_alloc; allocated++) {
10611067
struct net_iov *niov = zcrx_get_free_niov(area);
1062-
netmem_ref netmem;
10631068

10641069
if (!niov)
10651070
break;
10661071
net_mp_niov_set_page_pool(pp, niov);
10671072
io_zcrx_sync_for_device(pp, niov);
1068-
netmem = net_iov_to_netmem(niov);
1069-
net_mp_netmem_place_in_cache(pp, netmem);
1073+
netmems[allocated] = net_iov_to_netmem(niov);
10701074
}
1075+
return allocated;
10711076
}
10721077

10731078
static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
10741079
{
10751080
struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1081+
netmem_ref *netmems = pp->alloc.cache;
1082+
unsigned to_alloc = PP_ALLOC_CACHE_REFILL;
1083+
unsigned allocated;
10761084

10771085
/* pp should already be ensuring that */
10781086
if (WARN_ON_ONCE(pp->alloc.count))
10791087
return 0;
10801088

1081-
io_zcrx_ring_refill(pp, ifq);
1082-
if (likely(pp->alloc.count))
1089+
allocated = io_zcrx_ring_refill(pp, ifq, netmems, to_alloc);
1090+
if (likely(allocated))
10831091
goto out_return;
10841092

1085-
io_zcrx_refill_slow(pp, ifq);
1086-
if (!pp->alloc.count)
1093+
allocated = io_zcrx_refill_slow(pp, ifq, netmems, to_alloc);
1094+
if (!allocated)
10871095
return 0;
10881096
out_return:
1089-
return pp->alloc.cache[--pp->alloc.count];
1097+
allocated--;
1098+
pp->alloc.count += allocated;
1099+
return netmems[allocated];
10901100
}
10911101

10921102
static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)

0 commit comments

Comments
 (0)