@@ -74,60 +74,85 @@ void __io_uring_free(struct task_struct *tsk)
7474 }
7575}
7676
77- __cold int io_uring_alloc_task_context (struct task_struct * task ,
78- struct io_ring_ctx * ctx )
77+ __cold struct io_uring_task * io_uring_alloc_task_context (struct task_struct * task ,
78+ struct io_ring_ctx * ctx )
7979{
8080 struct io_uring_task * tctx ;
8181 int ret ;
8282
8383 tctx = kzalloc_obj (* tctx );
8484 if (unlikely (!tctx ))
85- return - ENOMEM ;
85+ return ERR_PTR ( - ENOMEM ) ;
8686
8787 ret = percpu_counter_init (& tctx -> inflight , 0 , GFP_KERNEL );
8888 if (unlikely (ret )) {
8989 kfree (tctx );
90- return ret ;
90+ return ERR_PTR ( ret ) ;
9191 }
9292
9393 tctx -> io_wq = io_init_wq_offload (ctx , task );
9494 if (IS_ERR (tctx -> io_wq )) {
9595 ret = PTR_ERR (tctx -> io_wq );
9696 percpu_counter_destroy (& tctx -> inflight );
9797 kfree (tctx );
98- return ret ;
98+ return ERR_PTR ( ret ) ;
9999 }
100100
101101 tctx -> task = task ;
102102 xa_init (& tctx -> xa );
103103 init_waitqueue_head (& tctx -> wait );
104104 atomic_set (& tctx -> in_cancel , 0 );
105105 atomic_set (& tctx -> inflight_tracked , 0 );
106- task -> io_uring = tctx ;
107106 init_llist_head (& tctx -> task_list );
108107 init_task_work (& tctx -> task_work , tctx_task_work );
108+ return tctx ;
109+ }
110+
111+ static int io_tctx_install_node (struct io_ring_ctx * ctx ,
112+ struct io_uring_task * tctx )
113+ {
114+ struct io_tctx_node * node ;
115+ int ret ;
116+
117+ if (xa_load (& tctx -> xa , (unsigned long )ctx ))
118+ return 0 ;
119+
120+ node = kmalloc_obj (* node );
121+ if (!node )
122+ return - ENOMEM ;
123+ node -> ctx = ctx ;
124+ node -> task = current ;
125+
126+ ret = xa_err (xa_store (& tctx -> xa , (unsigned long )ctx ,
127+ node , GFP_KERNEL ));
128+ if (ret ) {
129+ kfree (node );
130+ return ret ;
131+ }
132+
133+ mutex_lock (& ctx -> tctx_lock );
134+ list_add (& node -> ctx_node , & ctx -> tctx_list );
135+ mutex_unlock (& ctx -> tctx_lock );
109136 return 0 ;
110137}
111138
112139int __io_uring_add_tctx_node (struct io_ring_ctx * ctx )
113140{
114141 struct io_uring_task * tctx = current -> io_uring ;
115- struct io_tctx_node * node ;
116142 int ret ;
117143
118144 if (unlikely (!tctx )) {
119- ret = io_uring_alloc_task_context (current , ctx );
120- if (unlikely ( ret ))
121- return ret ;
145+ tctx = io_uring_alloc_task_context (current , ctx );
146+ if (IS_ERR ( tctx ))
147+ return PTR_ERR ( tctx ) ;
122148
123- tctx = current -> io_uring ;
124149 if (ctx -> int_flags & IO_RING_F_IOWQ_LIMITS_SET ) {
125150 unsigned int limits [2 ] = { ctx -> iowq_limits [0 ],
126151 ctx -> iowq_limits [1 ], };
127152
128153 ret = io_wq_max_workers (tctx -> io_wq , limits );
129154 if (ret )
130- return ret ;
155+ goto err_free ;
131156 }
132157 }
133158
@@ -138,25 +163,19 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
138163 */
139164 if (tctx -> io_wq )
140165 io_wq_set_exit_on_idle (tctx -> io_wq , false);
141- if (!xa_load (& tctx -> xa , (unsigned long )ctx )) {
142- node = kmalloc_obj (* node );
143- if (!node )
144- return - ENOMEM ;
145- node -> ctx = ctx ;
146- node -> task = current ;
147-
148- ret = xa_err (xa_store (& tctx -> xa , (unsigned long )ctx ,
149- node , GFP_KERNEL ));
150- if (ret ) {
151- kfree (node );
152- return ret ;
153- }
154166
155- mutex_lock (& ctx -> tctx_lock );
156- list_add (& node -> ctx_node , & ctx -> tctx_list );
157- mutex_unlock (& ctx -> tctx_lock );
167+ ret = io_tctx_install_node (ctx , tctx );
168+ if (!ret ) {
169+ current -> io_uring = tctx ;
170+ return 0 ;
158171 }
159- return 0 ;
172+ if (!current -> io_uring ) {
173+ err_free :
174+ io_wq_put_and_exit (tctx -> io_wq );
175+ percpu_counter_destroy (& tctx -> inflight );
176+ kfree (tctx );
177+ }
178+ return ret ;
160179}
161180
162181int __io_uring_add_tctx_node_from_submit (struct io_ring_ctx * ctx )
0 commit comments