@@ -389,17 +389,17 @@ static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
389389 return ret ;
390390
391391 ptr = io_region_get_ptr (& ifq -> rq_region );
392- ifq -> rq_ring = (struct io_uring * )ptr ;
393- ifq -> rqes = (struct io_uring_zcrx_rqe * )(ptr + off );
392+ ifq -> rq . ring = (struct io_uring * )ptr ;
393+ ifq -> rq . rqes = (struct io_uring_zcrx_rqe * )(ptr + off );
394394
395395 return 0 ;
396396}
397397
398398static void io_free_rbuf_ring (struct io_zcrx_ifq * ifq )
399399{
400400 io_free_region (ifq -> user , & ifq -> rq_region );
401- ifq -> rq_ring = NULL ;
402- ifq -> rqes = NULL ;
401+ ifq -> rq . ring = NULL ;
402+ ifq -> rq . rqes = NULL ;
403403}
404404
405405static void io_zcrx_free_area (struct io_zcrx_ifq * ifq ,
@@ -519,7 +519,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
519519 return NULL ;
520520
521521 ifq -> if_rxq = -1 ;
522- spin_lock_init (& ifq -> rq_lock );
522+ spin_lock_init (& ifq -> rq . lock );
523523 mutex_init (& ifq -> pp_lock );
524524 refcount_set (& ifq -> refs , 1 );
525525 refcount_set (& ifq -> user_refs , 1 );
@@ -855,7 +855,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
855855 mmgrab (ctx -> mm_account );
856856 ifq -> mm_account = ctx -> mm_account ;
857857 }
858- ifq -> rq_entries = reg .rq_entries ;
858+ ifq -> rq . nr_entries = reg .rq_entries ;
859859
860860 scoped_guard (mutex , & ctx -> mmap_lock ) {
861861 /* preallocate id */
@@ -971,20 +971,19 @@ void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
971971 xa_destroy (& ctx -> zcrx_ctxs );
972972}
973973
974- static inline u32 io_zcrx_rqring_entries (struct io_zcrx_ifq * ifq )
974+ static inline u32 zcrx_rq_entries (struct zcrx_rq * rq )
975975{
976976 u32 entries ;
977977
978- entries = smp_load_acquire (& ifq -> rq_ring -> tail ) - ifq -> cached_rq_head ;
979- return min (entries , ifq -> rq_entries );
978+ entries = smp_load_acquire (& rq -> ring -> tail ) - rq -> cached_head ;
979+ return min (entries , rq -> nr_entries );
980980}
981981
982- static struct io_uring_zcrx_rqe * io_zcrx_get_rqe (struct io_zcrx_ifq * ifq ,
983- unsigned mask )
982+ static struct io_uring_zcrx_rqe * zcrx_next_rqe (struct zcrx_rq * rq , unsigned mask )
984983{
985- unsigned int idx = ifq -> cached_rq_head ++ & mask ;
984+ unsigned int idx = rq -> cached_head ++ & mask ;
986985
987- return & ifq -> rqes [idx ];
986+ return & rq -> rqes [idx ];
988987}
989988
990989static inline bool io_parse_rqe (struct io_uring_zcrx_rqe * rqe ,
@@ -1013,18 +1012,19 @@ static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
10131012static void io_zcrx_ring_refill (struct page_pool * pp ,
10141013 struct io_zcrx_ifq * ifq )
10151014{
1016- unsigned int mask = ifq -> rq_entries - 1 ;
1015+ struct zcrx_rq * rq = & ifq -> rq ;
1016+ unsigned int mask = rq -> nr_entries - 1 ;
10171017 unsigned int entries ;
10181018
1019- guard (spinlock_bh )(& ifq -> rq_lock );
1019+ guard (spinlock_bh )(& rq -> lock );
10201020
1021- entries = io_zcrx_rqring_entries ( ifq );
1021+ entries = zcrx_rq_entries ( rq );
10221022 entries = min_t (unsigned , entries , PP_ALLOC_CACHE_REFILL );
10231023 if (unlikely (!entries ))
10241024 return ;
10251025
10261026 do {
1027- struct io_uring_zcrx_rqe * rqe = io_zcrx_get_rqe ( ifq , mask );
1027+ struct io_uring_zcrx_rqe * rqe = zcrx_next_rqe ( rq , mask );
10281028 struct net_iov * niov ;
10291029 netmem_ref netmem ;
10301030
@@ -1046,7 +1046,7 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
10461046 net_mp_netmem_place_in_cache (pp , netmem );
10471047 } while (-- entries );
10481048
1049- smp_store_release (& ifq -> rq_ring -> head , ifq -> cached_rq_head );
1049+ smp_store_release (& rq -> ring -> head , rq -> cached_head );
10501050}
10511051
10521052static void io_zcrx_refill_slow (struct page_pool * pp , struct io_zcrx_ifq * ifq )
@@ -1159,22 +1159,22 @@ static const struct memory_provider_ops io_uring_pp_zc_ops = {
11591159};
11601160
11611161static unsigned zcrx_parse_rq (netmem_ref * netmem_array , unsigned nr ,
1162- struct io_zcrx_ifq * zcrx )
1162+ struct io_zcrx_ifq * zcrx , struct zcrx_rq * rq )
11631163{
1164- unsigned int mask = zcrx -> rq_entries - 1 ;
1164+ unsigned int mask = rq -> nr_entries - 1 ;
11651165 unsigned int i ;
11661166
1167- nr = min (nr , io_zcrx_rqring_entries ( zcrx ));
1167+ nr = min (nr , zcrx_rq_entries ( rq ));
11681168 for (i = 0 ; i < nr ; i ++ ) {
1169- struct io_uring_zcrx_rqe * rqe = io_zcrx_get_rqe ( zcrx , mask );
1169+ struct io_uring_zcrx_rqe * rqe = zcrx_next_rqe ( rq , mask );
11701170 struct net_iov * niov ;
11711171
11721172 if (!io_parse_rqe (rqe , zcrx , & niov ))
11731173 break ;
11741174 netmem_array [i ] = net_iov_to_netmem (niov );
11751175 }
11761176
1177- smp_store_release (& zcrx -> rq_ring -> head , zcrx -> cached_rq_head );
1177+ smp_store_release (& rq -> ring -> head , rq -> cached_head );
11781178 return i ;
11791179}
11801180
@@ -1208,8 +1208,10 @@ static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
12081208 return - EINVAL ;
12091209
12101210 do {
1211- scoped_guard (spinlock_bh , & zcrx -> rq_lock ) {
1212- nr = zcrx_parse_rq (netmems , ZCRX_FLUSH_BATCH , zcrx );
1211+ struct zcrx_rq * rq = & zcrx -> rq ;
1212+
1213+ scoped_guard (spinlock_bh , & rq -> lock ) {
1214+ nr = zcrx_parse_rq (netmems , ZCRX_FLUSH_BATCH , zcrx , rq );
12131215 zcrx_return_buffers (netmems , nr );
12141216 }
12151217
@@ -1218,7 +1220,7 @@ static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
12181220 if (fatal_signal_pending (current ))
12191221 break ;
12201222 cond_resched ();
1221- } while (nr == ZCRX_FLUSH_BATCH && total < zcrx -> rq_entries );
1223+ } while (nr == ZCRX_FLUSH_BATCH && total < zcrx -> rq . nr_entries );
12221224
12231225 return 0 ;
12241226}
0 commit comments