Skip to content

Commit 0842186

Browse files
ps-ushankaraxboe
authored andcommitted
ublk: reset per-IO canceled flag on each fetch
If a ublk server starts recovering devices but dies before issuing fetch commands for all IOs, cancellation of the fetch commands that were successfully issued may never complete. This is because the per-IO canceled flag can remain set even after the fetch for that IO has been submitted - the per-IO canceled flags for all IOs in a queue are reset together only once all IOs for that queue have been fetched. So if a nonempty proper subset of the IOs for a queue are fetched when the ublk server dies, the IOs in that subset will never successfully be canceled, as their canceled flags remain set, and this prevents ublk_cancel_cmd from actually calling io_uring_cmd_done on the commands, despite the fact that they are outstanding. Fix this by resetting the per-IO cancel flags immediately when each IO is fetched instead of waiting for all IOs for the queue (which may never happen). Signed-off-by: Uday Shankar <ushankar@purestorage.com> Fixes: 728cbac ("ublk: move device reset into ublk_ch_release()") Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: zhang, the-essence-of-life <zhangweize9@gmail.com> Link: https://patch.msgid.link/20260405-cancel-v2-1-02d711e643c2@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 8b155f2 commit 0842186

1 file changed

Lines changed: 13 additions & 8 deletions

File tree

drivers/block/ublk_drv.c

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2916,22 +2916,26 @@ static void ublk_stop_dev(struct ublk_device *ub)
29162916
ublk_cancel_dev(ub);
29172917
}
29182918

2919+
static void ublk_reset_io_flags(struct ublk_queue *ubq, struct ublk_io *io)
2920+
{
2921+
/* UBLK_IO_FLAG_CANCELED can be cleared now */
2922+
spin_lock(&ubq->cancel_lock);
2923+
io->flags &= ~UBLK_IO_FLAG_CANCELED;
2924+
spin_unlock(&ubq->cancel_lock);
2925+
}
2926+
29192927
/* reset per-queue io flags */
29202928
static void ublk_queue_reset_io_flags(struct ublk_queue *ubq)
29212929
{
2922-
int j;
2923-
2924-
/* UBLK_IO_FLAG_CANCELED can be cleared now */
29252930
spin_lock(&ubq->cancel_lock);
2926-
for (j = 0; j < ubq->q_depth; j++)
2927-
ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
29282931
ubq->canceling = false;
29292932
spin_unlock(&ubq->cancel_lock);
29302933
ubq->fail_io = false;
29312934
}
29322935

29332936
/* device can only be started after all IOs are ready */
2934-
static void ublk_mark_io_ready(struct ublk_device *ub, u16 q_id)
2937+
static void ublk_mark_io_ready(struct ublk_device *ub, u16 q_id,
2938+
struct ublk_io *io)
29352939
__must_hold(&ub->mutex)
29362940
{
29372941
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
@@ -2940,6 +2944,7 @@ static void ublk_mark_io_ready(struct ublk_device *ub, u16 q_id)
29402944
ub->unprivileged_daemons = true;
29412945

29422946
ubq->nr_io_ready++;
2947+
ublk_reset_io_flags(ubq, io);
29432948

29442949
/* Check if this specific queue is now fully ready */
29452950
if (ublk_queue_ready(ubq)) {
@@ -3202,7 +3207,7 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
32023207
if (!ret)
32033208
ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
32043209
if (!ret)
3205-
ublk_mark_io_ready(ub, q_id);
3210+
ublk_mark_io_ready(ub, q_id, io);
32063211
mutex_unlock(&ub->mutex);
32073212
return ret;
32083213
}
@@ -3610,7 +3615,7 @@ static int ublk_batch_prep_io(struct ublk_queue *ubq,
36103615
ublk_io_unlock(io);
36113616

36123617
if (!ret)
3613-
ublk_mark_io_ready(data->ub, ubq->q_id);
3618+
ublk_mark_io_ready(data->ub, ubq->q_id, io);
36143619

36153620
return ret;
36163621
}

0 commit comments

Comments
 (0)