Skip to content

Commit b8d6eb6

Browse files
isilenceaxboe
authored andcommitted
io_uring/zcrx: always dma map in advance
zcrx was originally establisihing dma mappings at a late stage when it was being bound to a page pool. Dma-buf couldn't work this way, so it's initialised during area creation. It's messy having them do it at different spots, just move everything to the area creation time. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://patch.msgid.link/334092a2cbdd4aabd7c025050aa99f05ace89bb5.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 4104156 commit b8d6eb6

1 file changed

Lines changed: 15 additions & 29 deletions

File tree

io_uring/zcrx.c

Lines changed: 15 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
194194
{
195195
struct page **pages;
196196
int nr_pages, ret;
197+
bool mapped = false;
197198

198199
if (area_reg->dmabuf_fd)
199200
return -EINVAL;
@@ -210,6 +211,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
210211
if (ret)
211212
goto out_err;
212213

214+
ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
215+
DMA_FROM_DEVICE, IO_DMA_ATTR);
216+
if (ret < 0)
217+
goto out_err;
218+
mapped = true;
219+
213220
mem->account_pages = io_count_account_pages(pages, nr_pages);
214221
ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
215222
if (ret < 0) {
@@ -223,6 +230,9 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
223230
mem->size = area_reg->len;
224231
return ret;
225232
out_err:
233+
if (mapped)
234+
dma_unmap_sgtable(ifq->dev, &mem->page_sg_table,
235+
DMA_FROM_DEVICE, IO_DMA_ATTR);
226236
sg_free_table(&mem->page_sg_table);
227237
unpin_user_pages(pages, nr_pages);
228238
kvfree(pages);
@@ -288,30 +298,6 @@ static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
288298
}
289299
}
290300

291-
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
292-
{
293-
int ret;
294-
295-
guard(mutex)(&ifq->pp_lock);
296-
if (area->is_mapped)
297-
return 0;
298-
299-
if (!area->mem.is_dmabuf) {
300-
ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
301-
DMA_FROM_DEVICE, IO_DMA_ATTR);
302-
if (ret < 0)
303-
return ret;
304-
}
305-
306-
ret = io_populate_area_dma(ifq, area);
307-
if (ret && !area->mem.is_dmabuf)
308-
dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
309-
DMA_FROM_DEVICE, IO_DMA_ATTR);
310-
if (ret == 0)
311-
area->is_mapped = true;
312-
return ret;
313-
}
314-
315301
static void io_zcrx_sync_for_device(struct page_pool *pool,
316302
struct net_iov *niov)
317303
{
@@ -464,6 +450,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
464450
ret = io_import_area(ifq, &area->mem, area_reg);
465451
if (ret)
466452
goto err;
453+
area->is_mapped = true;
467454

468455
if (buf_size_shift > io_area_max_shift(&area->mem)) {
469456
ret = -ERANGE;
@@ -499,6 +486,10 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
499486
niov->type = NET_IOV_IOURING;
500487
}
501488

489+
ret = io_populate_area_dma(ifq, area);
490+
if (ret)
491+
goto err;
492+
502493
area->free_count = nr_iovs;
503494
/* we're only supporting one area per ifq for now */
504495
area->area_id = 0;
@@ -1082,7 +1073,6 @@ static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
10821073
static int io_pp_zc_init(struct page_pool *pp)
10831074
{
10841075
struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1085-
int ret;
10861076

10871077
if (WARN_ON_ONCE(!ifq))
10881078
return -EINVAL;
@@ -1095,10 +1085,6 @@ static int io_pp_zc_init(struct page_pool *pp)
10951085
if (pp->p.dma_dir != DMA_FROM_DEVICE)
10961086
return -EOPNOTSUPP;
10971087

1098-
ret = io_zcrx_map_area(ifq, ifq->area);
1099-
if (ret)
1100-
return ret;
1101-
11021088
refcount_inc(&ifq->refs);
11031089
return 0;
11041090
}

0 commit comments

Comments
 (0)