Skip to content

Commit f57efb3

Browse files
committed
eth: bnxt: store rx buffer size per queue
Instead of using a constant buffer length, allow configuring the size for each queue separately. There is no way to change the length yet, and it'll be passed from memory providers in a later patch. Suggested-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
1 parent c0b709b commit f57efb3

4 files changed

Lines changed: 38 additions & 27 deletions

File tree

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 33 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -905,7 +905,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
905905

906906
static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
907907
{
908-
return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
908+
return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
909909
}
910910

911911
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -915,9 +915,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
915915
{
916916
struct page *page;
917917

918-
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
918+
if (rxr->rx_page_size < PAGE_SIZE) {
919919
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
920-
BNXT_RX_PAGE_SIZE);
920+
rxr->rx_page_size);
921921
} else {
922922
page = page_pool_dev_alloc_pages(rxr->page_pool);
923923
*offset = 0;
@@ -936,8 +936,9 @@ static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
936936
{
937937
netmem_ref netmem;
938938

939-
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
940-
netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
939+
if (rxr->rx_page_size < PAGE_SIZE) {
940+
netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
941+
rxr->rx_page_size, gfp);
941942
} else {
942943
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
943944
*offset = 0;
@@ -1155,9 +1156,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
11551156
return NULL;
11561157
}
11571158
dma_addr -= bp->rx_dma_offset;
1158-
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1159+
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
11591160
bp->rx_dir);
1160-
skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1161+
skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
11611162
if (!skb) {
11621163
page_pool_recycle_direct(rxr->page_pool, page);
11631164
return NULL;
@@ -1189,7 +1190,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
11891190
return NULL;
11901191
}
11911192
dma_addr -= bp->rx_dma_offset;
1192-
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1193+
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
11931194
bp->rx_dir);
11941195

11951196
if (unlikely(!payload))
@@ -1203,7 +1204,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
12031204

12041205
skb_mark_for_recycle(skb);
12051206
off = (void *)data_ptr - page_address(page);
1206-
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1207+
skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
12071208
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
12081209
payload + NET_IP_ALIGN);
12091210

@@ -1288,7 +1289,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
12881289
if (skb) {
12891290
skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
12901291
cons_rx_buf->offset,
1291-
frag_len, BNXT_RX_PAGE_SIZE);
1292+
frag_len, rxr->rx_page_size);
12921293
} else {
12931294
skb_frag_t *frag = &shinfo->frags[i];
12941295

@@ -1313,7 +1314,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
13131314
if (skb) {
13141315
skb->len -= frag_len;
13151316
skb->data_len -= frag_len;
1316-
skb->truesize -= BNXT_RX_PAGE_SIZE;
1317+
skb->truesize -= rxr->rx_page_size;
13171318
}
13181319

13191320
--shinfo->nr_frags;
@@ -1328,7 +1329,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
13281329
}
13291330

13301331
page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1331-
BNXT_RX_PAGE_SIZE);
1332+
rxr->rx_page_size);
13321333

13331334
total_frag_len += frag_len;
13341335
prod = NEXT_RX_AGG(prod);
@@ -2290,8 +2291,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
22902291
if (!skb)
22912292
goto oom_next_rx;
22922293
} else {
2293-
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2294-
rxr->page_pool, &xdp);
2294+
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
22952295
if (!skb) {
22962296
/* we should be able to free the old skb here */
22972297
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3837,11 +3837,13 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
38373837
pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
38383838
if (BNXT_RX_PAGE_MODE(bp))
38393839
pp.pool_size += bp->rx_ring_size / rx_size_fac;
3840+
3841+
pp.order = get_order(rxr->rx_page_size);
38403842
pp.nid = numa_node;
38413843
pp.netdev = bp->dev;
38423844
pp.dev = &bp->pdev->dev;
38433845
pp.dma_dir = bp->rx_dir;
3844-
pp.max_len = PAGE_SIZE;
3846+
pp.max_len = PAGE_SIZE << pp.order;
38453847
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
38463848
PP_FLAG_ALLOW_UNREADABLE_NETMEM;
38473849
pp.queue_idx = rxr->bnapi->index;
@@ -3852,7 +3854,10 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
38523854
rxr->page_pool = pool;
38533855

38543856
rxr->need_head_pool = page_pool_is_unreadable(pool);
3857+
rxr->need_head_pool |= !!pp.order;
38553858
if (bnxt_separate_head_pool(rxr)) {
3859+
pp.order = 0;
3860+
pp.max_len = PAGE_SIZE;
38563861
pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
38573862
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
38583863
pool = page_pool_create(&pp);
@@ -4328,6 +4333,8 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
43284333
if (!rxr)
43294334
goto skip_rx;
43304335

4336+
rxr->rx_page_size = BNXT_RX_PAGE_SIZE;
4337+
43314338
ring = &rxr->rx_ring_struct;
43324339
rmem = &ring->ring_mem;
43334340
rmem->nr_pages = bp->rx_nr_pages;
@@ -4487,7 +4494,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
44874494
ring = &rxr->rx_agg_ring_struct;
44884495
ring->fw_ring_id = INVALID_HW_RING_ID;
44894496
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4490-
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4497+
type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
44914498
RX_BD_TYPE_RX_AGG_BD;
44924499

44934500
/* On P7, setting EOP will cause the chip to disable
@@ -7065,6 +7072,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
70657072

70667073
static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
70677074
struct hwrm_ring_alloc_input *req,
7075+
struct bnxt_rx_ring_info *rxr,
70687076
struct bnxt_ring_struct *ring)
70697077
{
70707078
struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
@@ -7074,7 +7082,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
70747082
if (ring_type == HWRM_RING_ALLOC_AGG) {
70757083
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
70767084
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7077-
req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7085+
req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
70787086
enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
70797087
} else {
70807088
req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -7088,6 +7096,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
70887096
}
70897097

70907098
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7099+
struct bnxt_rx_ring_info *rxr,
70917100
struct bnxt_ring_struct *ring,
70927101
u32 ring_type, u32 map_index)
70937102
{
@@ -7144,7 +7153,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
71447153
cpu_to_le32(bp->rx_ring_mask + 1) :
71457154
cpu_to_le32(bp->rx_agg_ring_mask + 1);
71467155
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7147-
bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
7156+
bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7157+
rxr, ring);
71487158
break;
71497159
case HWRM_RING_ALLOC_CMPL:
71507160
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7292,7 +7302,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
72927302
u32 map_idx = bnapi->index;
72937303
int rc;
72947304

7295-
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7305+
rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
72967306
if (rc)
72977307
return rc;
72987308

@@ -7312,7 +7322,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
73127322
int rc;
73137323

73147324
map_idx = grp_idx + bp->rx_nr_rings;
7315-
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7325+
rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
73167326
if (rc)
73177327
return rc;
73187328

@@ -7336,7 +7346,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
73367346

73377347
ring = &cpr->cp_ring_struct;
73387348
ring->handle = BNXT_SET_NQ_HDL(cpr);
7339-
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7349+
rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
73407350
if (rc)
73417351
return rc;
73427352
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
@@ -7351,7 +7361,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
73517361
const u32 type = HWRM_RING_ALLOC_TX;
73527362
int rc;
73537363

7354-
rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
7364+
rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
73557365
if (rc)
73567366
return rc;
73577367
bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
@@ -7377,7 +7387,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
73777387

73787388
vector = bp->irq_tbl[map_idx].vector;
73797389
disable_irq_nosync(vector);
7380-
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7390+
rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
73817391
if (rc) {
73827392
enable_irq(vector);
73837393
goto err_out;

drivers/net/ethernet/broadcom/bnxt/bnxt.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1105,6 +1105,7 @@ struct bnxt_rx_ring_info {
11051105

11061106
unsigned long *rx_agg_bmap;
11071107
u16 rx_agg_bmap_size;
1108+
u32 rx_page_size;
11081109
bool need_head_pool;
11091110

11101111
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
183183
u16 cons, u8 *data_ptr, unsigned int len,
184184
struct xdp_buff *xdp)
185185
{
186-
u32 buflen = BNXT_RX_PAGE_SIZE;
186+
u32 buflen = rxr->rx_page_size;
187187
struct bnxt_sw_rx_bd *rx_buf;
188188
struct pci_dev *pdev;
189189
dma_addr_t mapping;
@@ -460,15 +460,15 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
460460

461461
struct sk_buff *
462462
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
463-
struct page_pool *pool, struct xdp_buff *xdp)
463+
struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp)
464464
{
465465
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
466466

467467
if (!skb)
468468
return NULL;
469469

470470
xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
471-
BNXT_RX_PAGE_SIZE * num_frags,
471+
rxr->rx_page_size * num_frags,
472472
xdp_buff_get_skb_flags(xdp));
473473
return skb;
474474
}

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
3232
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
3333
struct xdp_buff *xdp);
3434
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
35-
u8 num_frags, struct page_pool *pool,
35+
u8 num_frags, struct bnxt_rx_ring_info *rxr,
3636
struct xdp_buff *xdp);
3737
#endif

0 commit comments

Comments
 (0)