Skip to content

Commit 9165dc4

Browse files
calebsanderaxboe
authored andcommitted
io_uring: add REQ_F_IOPOLL
A subsequent commit will allow uring_cmds to files that don't implement ->uring_cmd_iopoll() to be issued to IORING_SETUP_IOPOLL io_urings. This means the ctx's IORING_SETUP_IOPOLL flag isn't sufficient to determine whether a given request needs to be iopolled. Introduce a request flag REQ_F_IOPOLL set in ->issue() if a request needs to be iopolled to completion. Set the flag in io_rw_init_file() and io_uring_cmd() for requests issued to IORING_SETUP_IOPOLL ctxs. Use the request flag instead of IORING_SETUP_IOPOLL in places dealing with a specific request. A future possibility would be to add an option to enable/disable iopoll in the io_uring SQE instead of determining it from IORING_SETUP_IOPOLL. Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Link: https://patch.msgid.link/20260302172914.2488599-2-csander@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 8c55744 commit 9165dc4

4 files changed

Lines changed: 16 additions & 12 deletions

File tree

include/linux/io_uring_types.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -550,6 +550,7 @@ enum {
550550
REQ_F_HAS_METADATA_BIT,
551551
REQ_F_IMPORT_BUFFER_BIT,
552552
REQ_F_SQE_COPIED_BIT,
553+
REQ_F_IOPOLL_BIT,
553554

554555
/* not a real bit, just to check we're not overflowing the space */
555556
__REQ_F_LAST_BIT,
@@ -641,6 +642,8 @@ enum {
641642
REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
642643
/* ->sqe_copy() has been called, if necessary */
643644
REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
645+
/* request must be iopolled to completion (set in ->issue()) */
646+
REQ_F_IOPOLL = IO_REQ_FLAG(REQ_F_IOPOLL_BIT),
644647
};
645648

646649
struct io_tw_req {

io_uring/io_uring.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
356356
static void io_prep_async_work(struct io_kiocb *req)
357357
{
358358
const struct io_issue_def *def = &io_issue_defs[req->opcode];
359-
struct io_ring_ctx *ctx = req->ctx;
360359

361360
if (!(req->flags & REQ_F_CREDS)) {
362361
req->flags |= REQ_F_CREDS;
@@ -378,7 +377,7 @@ static void io_prep_async_work(struct io_kiocb *req)
378377
if (should_hash && (req->file->f_flags & O_DIRECT) &&
379378
(req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
380379
should_hash = false;
381-
if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
380+
if (should_hash || (req->flags & REQ_F_IOPOLL))
382381
io_wq_hash_work(&req->work, file_inode(req->file));
383382
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
384383
if (def->unbound_nonreg_file)
@@ -1419,7 +1418,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
14191418
ret = 0;
14201419

14211420
/* If the op doesn't have a file, we're not polling for it */
1422-
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1421+
if ((req->flags & REQ_F_IOPOLL) && def->iopoll_queue)
14231422
io_iopoll_req_issued(req, issue_flags);
14241423
}
14251424
return ret;
@@ -1435,7 +1434,7 @@ int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
14351434
io_tw_lock(req->ctx, tw);
14361435

14371436
WARN_ON_ONCE(!req->file);
1438-
if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL))
1437+
if (WARN_ON_ONCE(req->flags & REQ_F_IOPOLL))
14391438
return -EFAULT;
14401439

14411440
ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
@@ -1533,7 +1532,7 @@ void io_wq_submit_work(struct io_wq_work *work)
15331532
* wait for request slots on the block side.
15341533
*/
15351534
if (!needs_poll) {
1536-
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1535+
if (!(req->flags & REQ_F_IOPOLL))
15371536
break;
15381537
if (io_wq_worker_stopped())
15391538
break;

io_uring/rw.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
504504
if (!S_ISBLK(mode) && !S_ISREG(mode))
505505
return false;
506506
if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
507-
!(ctx->flags & IORING_SETUP_IOPOLL)))
507+
!(req->flags & REQ_F_IOPOLL)))
508508
return false;
509509
/*
510510
* If ref is dying, we might be running poll reap from the exit work.
@@ -640,7 +640,7 @@ static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
640640
}
641641
}
642642

643-
if (req->ctx->flags & IORING_SETUP_IOPOLL)
643+
if (req->flags & REQ_F_IOPOLL)
644644
io_complete_rw_iopoll(&rw->kiocb, ret);
645645
else
646646
io_complete_rw(&rw->kiocb, ret);
@@ -654,7 +654,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
654654

655655
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
656656
req->file->f_pos = rw->kiocb.ki_pos;
657-
if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
657+
if (ret >= 0 && !(req->flags & REQ_F_IOPOLL)) {
658658
u32 cflags = 0;
659659

660660
__io_complete_rw_common(req, ret);
@@ -876,6 +876,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
876876
if (ctx->flags & IORING_SETUP_IOPOLL) {
877877
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
878878
return -EOPNOTSUPP;
879+
req->flags |= REQ_F_IOPOLL;
879880
kiocb->private = NULL;
880881
kiocb->ki_flags |= IOCB_HIPRI;
881882
req->iopoll_completed = 0;
@@ -963,7 +964,7 @@ static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
963964
if (io_file_can_poll(req))
964965
return -EAGAIN;
965966
/* IOPOLL retry should happen for io-wq threads */
966-
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
967+
if (!force_nonblock && !(req->flags & REQ_F_IOPOLL))
967968
goto done;
968969
/* no retry on NONBLOCK nor RWF_NOWAIT */
969970
if (req->flags & REQ_F_NOWAIT)
@@ -1188,7 +1189,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
11881189
goto done;
11891190
if (!force_nonblock || ret2 != -EAGAIN) {
11901191
/* IOPOLL retry should happen for io-wq threads */
1191-
if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1192+
if (ret2 == -EAGAIN && (req->flags & REQ_F_IOPOLL))
11921193
goto ret_eagain;
11931194

11941195
if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {

io_uring/uring_cmd.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
110110
* because iopoll completion data overlaps with the hash_node used
111111
* for tracking.
112112
*/
113-
if (ctx->flags & IORING_SETUP_IOPOLL)
113+
if (req->flags & REQ_F_IOPOLL)
114114
return;
115115

116116
if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
@@ -167,7 +167,7 @@ void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
167167
io_req_set_cqe32_extra(req, res2, 0);
168168
}
169169
io_req_uring_cleanup(req, issue_flags);
170-
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
170+
if (req->flags & REQ_F_IOPOLL) {
171171
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
172172
smp_store_release(&req->iopoll_completed, 1);
173173
} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
@@ -260,6 +260,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
260260
if (ctx->flags & IORING_SETUP_IOPOLL) {
261261
if (!file->f_op->uring_cmd_iopoll)
262262
return -EOPNOTSUPP;
263+
req->flags |= REQ_F_IOPOLL;
263264
issue_flags |= IO_URING_F_IOPOLL;
264265
req->iopoll_completed = 0;
265266
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {

0 commit comments

Comments
 (0)