Skip to content

Commit 7880174

Browse files
committed
io_uring/tctx: clean up __io_uring_add_tctx_node() error handling
Refactor __io_uring_add_tctx_node() so that on error it never leaves current->io_uring pointing at a half-setup tctx. This moves the assignment of current->io_uring to the end of the function post any failure points. Separate out the node installation into io_tctx_install_node() to further clean this up. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 2c453a4 commit 7880174

1 file changed

Lines changed: 40 additions & 20 deletions

File tree

io_uring/tctx.c

Lines changed: 40 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -108,25 +108,51 @@ __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *tas
108108
return tctx;
109109
}
110110

111+
static int io_tctx_install_node(struct io_ring_ctx *ctx,
112+
struct io_uring_task *tctx)
113+
{
114+
struct io_tctx_node *node;
115+
int ret;
116+
117+
if (xa_load(&tctx->xa, (unsigned long)ctx))
118+
return 0;
119+
120+
node = kmalloc_obj(*node);
121+
if (!node)
122+
return -ENOMEM;
123+
node->ctx = ctx;
124+
node->task = current;
125+
126+
ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
127+
node, GFP_KERNEL));
128+
if (ret) {
129+
kfree(node);
130+
return ret;
131+
}
132+
133+
mutex_lock(&ctx->tctx_lock);
134+
list_add(&node->ctx_node, &ctx->tctx_list);
135+
mutex_unlock(&ctx->tctx_lock);
136+
return 0;
137+
}
138+
111139
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
112140
{
113141
struct io_uring_task *tctx = current->io_uring;
114-
struct io_tctx_node *node;
115142
int ret;
116143

117144
if (unlikely(!tctx)) {
118145
tctx = io_uring_alloc_task_context(current, ctx);
119146
if (IS_ERR(tctx))
120147
return PTR_ERR(tctx);
121148

122-
current->io_uring = tctx;
123149
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
124150
unsigned int limits[2] = { ctx->iowq_limits[0],
125151
ctx->iowq_limits[1], };
126152

127153
ret = io_wq_max_workers(tctx->io_wq, limits);
128154
if (ret)
129-
return ret;
155+
goto err_free;
130156
}
131157
}
132158

@@ -137,25 +163,19 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
137163
*/
138164
if (tctx->io_wq)
139165
io_wq_set_exit_on_idle(tctx->io_wq, false);
140-
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
141-
node = kmalloc_obj(*node);
142-
if (!node)
143-
return -ENOMEM;
144-
node->ctx = ctx;
145-
node->task = current;
146-
147-
ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
148-
node, GFP_KERNEL));
149-
if (ret) {
150-
kfree(node);
151-
return ret;
152-
}
153166

154-
mutex_lock(&ctx->tctx_lock);
155-
list_add(&node->ctx_node, &ctx->tctx_list);
156-
mutex_unlock(&ctx->tctx_lock);
167+
ret = io_tctx_install_node(ctx, tctx);
168+
if (!ret) {
169+
current->io_uring = tctx;
170+
return 0;
157171
}
158-
return 0;
172+
if (!current->io_uring) {
173+
err_free:
174+
io_wq_put_and_exit(tctx->io_wq);
175+
percpu_counter_destroy(&tctx->inflight);
176+
kfree(tctx);
177+
}
178+
return ret;
159179
}
160180

161181
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)

0 commit comments

Comments
 (0)