@@ -79,12 +79,15 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
7979 if (io_has_work (ctx ))
8080 goto out_wake ;
8181 /* got events since we started waiting, min timeout is done */
82- if (iowq -> cq_min_tail != READ_ONCE (ctx -> rings -> cq .tail ))
83- goto out_wake ;
84- /* if we have any events and min timeout expired, we're done */
85- if (io_cqring_events (ctx ))
86- goto out_wake ;
82+ scoped_guard (rcu ) {
83+ struct io_rings * rings = io_get_rings (ctx );
8784
85+ if (iowq -> cq_min_tail != READ_ONCE (rings -> cq .tail ))
86+ goto out_wake ;
87+ /* if we have any events and min timeout expired, we're done */
88+ if (io_cqring_events (ctx ))
89+ goto out_wake ;
90+ }
8891 /*
8992 * If using deferred task_work running and application is waiting on
9093 * more than one request, ensure we reset it now where we are switching
@@ -186,9 +189,9 @@ int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
186189 struct ext_arg * ext_arg )
187190{
188191 struct io_wait_queue iowq ;
189- struct io_rings * rings = ctx -> rings ;
192+ struct io_rings * rings ;
190193 ktime_t start_time ;
191- int ret ;
194+ int ret , nr_wait ;
192195
193196 min_events = min_t (int , min_events , ctx -> cq_entries );
194197
@@ -201,15 +204,23 @@ int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
201204
202205 if (unlikely (test_bit (IO_CHECK_CQ_OVERFLOW_BIT , & ctx -> check_cq )))
203206 io_cqring_do_overflow_flush (ctx );
204- if (__io_cqring_events_user (ctx ) >= min_events )
207+
208+ rcu_read_lock ();
209+ rings = io_get_rings (ctx );
210+ if (__io_cqring_events_user (ctx ) >= min_events ) {
211+ rcu_read_unlock ();
205212 return 0 ;
213+ }
206214
207215 init_waitqueue_func_entry (& iowq .wq , io_wake_function );
208216 iowq .wq .private = current ;
209217 INIT_LIST_HEAD (& iowq .wq .entry );
210218 iowq .ctx = ctx ;
211- iowq .cq_tail = READ_ONCE (ctx -> rings -> cq .head ) + min_events ;
212- iowq .cq_min_tail = READ_ONCE (ctx -> rings -> cq .tail );
219+ iowq .cq_tail = READ_ONCE (rings -> cq .head ) + min_events ;
220+ iowq .cq_min_tail = READ_ONCE (rings -> cq .tail );
221+ nr_wait = (int ) iowq .cq_tail - READ_ONCE (rings -> cq .tail );
222+ rcu_read_unlock ();
223+ rings = NULL ;
213224 iowq .nr_timeouts = atomic_read (& ctx -> cq_timeouts );
214225 iowq .hit_timeout = 0 ;
215226 iowq .min_timeout = ext_arg -> min_time ;
@@ -240,14 +251,6 @@ int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
240251 trace_io_uring_cqring_wait (ctx , min_events );
241252 do {
242253 unsigned long check_cq ;
243- int nr_wait ;
244-
245- /* if min timeout has been hit, don't reset wait count */
246- if (!iowq .hit_timeout )
247- nr_wait = (int ) iowq .cq_tail -
248- READ_ONCE (ctx -> rings -> cq .tail );
249- else
250- nr_wait = 1 ;
251254
252255 if (ctx -> flags & IORING_SETUP_DEFER_TASKRUN ) {
253256 atomic_set (& ctx -> cq_wait_nr , nr_wait );
@@ -298,11 +301,20 @@ int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
298301 break ;
299302 }
300303 cond_resched ();
304+
305+ /* if min timeout has been hit, don't reset wait count */
306+ if (!iowq .hit_timeout )
307+ scoped_guard (rcu )
308+ nr_wait = (int ) iowq .cq_tail -
309+ READ_ONCE (io_get_rings (ctx )-> cq .tail );
310+ else
311+ nr_wait = 1 ;
301312 } while (1 );
302313
303314 if (!(ctx -> flags & IORING_SETUP_DEFER_TASKRUN ))
304315 finish_wait (& ctx -> cq_wait , & iowq .wq );
305316 restore_saved_sigmask_unless (ret == - EINTR );
306317
307- return READ_ONCE (rings -> cq .head ) == READ_ONCE (rings -> cq .tail ) ? ret : 0 ;
318+ guard (rcu )();
319+ return READ_ONCE (io_get_rings (ctx )-> cq .head ) == READ_ONCE (io_get_rings (ctx )-> cq .tail ) ? ret : 0 ;
308320}
0 commit comments