@@ -1260,6 +1260,9 @@ static void __io_req_aux_free(struct io_kiocb *req)
12601260{
12611261 struct io_ring_ctx * ctx = req -> ctx ;
12621262
1263+ if (req -> flags & REQ_F_NEED_CLEANUP )
1264+ io_cleanup_req (req );
1265+
12631266 kfree (req -> io );
12641267 if (req -> file ) {
12651268 if (req -> flags & REQ_F_FIXED_FILE )
@@ -1275,9 +1278,6 @@ static void __io_free_req(struct io_kiocb *req)
12751278{
12761279 __io_req_aux_free (req );
12771280
1278- if (req -> flags & REQ_F_NEED_CLEANUP )
1279- io_cleanup_req (req );
1280-
12811281 if (req -> flags & REQ_F_INFLIGHT ) {
12821282 struct io_ring_ctx * ctx = req -> ctx ;
12831283 unsigned long flags ;
@@ -1672,11 +1672,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
16721672 mutex_unlock (& ctx -> uring_lock );
16731673}
16741674
1675- static int __io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1676- long min )
1675+ static int io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1676+ long min )
16771677{
16781678 int iters = 0 , ret = 0 ;
16791679
1680+ /*
1681+ * We disallow the app entering submit/complete with polling, but we
1682+ * still need to lock the ring to prevent racing with polled issue
1683+ * that got punted to a workqueue.
1684+ */
1685+ mutex_lock (& ctx -> uring_lock );
16801686 do {
16811687 int tmin = 0 ;
16821688
@@ -1712,21 +1718,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
17121718 ret = 0 ;
17131719 } while (min && !* nr_events && !need_resched ());
17141720
1715- return ret ;
1716- }
1717-
1718- static int io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1719- long min )
1720- {
1721- int ret ;
1722-
1723- /*
1724- * We disallow the app entering submit/complete with polling, but we
1725- * still need to lock the ring to prevent racing with polled issue
1726- * that got punted to a workqueue.
1727- */
1728- mutex_lock (& ctx -> uring_lock );
1729- ret = __io_iopoll_check (ctx , nr_events , min );
17301721 mutex_unlock (& ctx -> uring_lock );
17311722 return ret ;
17321723}
@@ -2517,6 +2508,9 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
25172508 struct io_kiocb * nxt = NULL ;
25182509 int ret ;
25192510
2511+ if (io_req_cancelled (req ))
2512+ return ;
2513+
25202514 ret = vfs_fallocate (req -> file , req -> sync .mode , req -> sync .off ,
25212515 req -> sync .len );
25222516 if (ret < 0 )
@@ -2904,6 +2898,7 @@ static void io_close_finish(struct io_wq_work **workptr)
29042898 struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
29052899 struct io_kiocb * nxt = NULL ;
29062900
2901+ /* not cancellable, don't do io_req_cancelled() */
29072902 __io_close_finish (req , & nxt );
29082903 if (nxt )
29092904 io_wq_assign_next (workptr , nxt );
@@ -3071,7 +3066,7 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
30713066 if (req -> io )
30723067 return - EAGAIN ;
30733068 if (io_alloc_async_ctx (req )) {
3074- if (kmsg && kmsg -> iov != kmsg -> fast_iov )
3069+ if (kmsg -> iov != kmsg -> fast_iov )
30753070 kfree (kmsg -> iov );
30763071 return - ENOMEM ;
30773072 }
@@ -3225,7 +3220,7 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
32253220 if (req -> io )
32263221 return - EAGAIN ;
32273222 if (io_alloc_async_ctx (req )) {
3228- if (kmsg && kmsg -> iov != kmsg -> fast_iov )
3223+ if (kmsg -> iov != kmsg -> fast_iov )
32293224 kfree (kmsg -> iov );
32303225 return - ENOMEM ;
32313226 }
@@ -5114,7 +5109,7 @@ static int io_sq_thread(void *data)
51145109 */
51155110 mutex_lock (& ctx -> uring_lock );
51165111 if (!list_empty (& ctx -> poll_list ))
5117- __io_iopoll_check (ctx , & nr_events , 0 );
5112+ io_iopoll_getevents (ctx , & nr_events , 0 );
51185113 else
51195114 inflight = 0 ;
51205115 mutex_unlock (& ctx -> uring_lock );
@@ -5138,6 +5133,18 @@ static int io_sq_thread(void *data)
51385133 * to enter the kernel to reap and flush events.
51395134 */
51405135 if (!to_submit || ret == - EBUSY ) {
5136+ /*
5137+ * Drop cur_mm before scheduling, we can't hold it for
5138+ * long periods (or over schedule()). Do this before
5139+ * adding ourselves to the waitqueue, as the unuse/drop
5140+ * may sleep.
5141+ */
5142+ if (cur_mm ) {
5143+ unuse_mm (cur_mm );
5144+ mmput (cur_mm );
5145+ cur_mm = NULL ;
5146+ }
5147+
51415148 /*
51425149 * We're polling. If we're within the defined idle
51435150 * period, then let us spin without work before going
@@ -5152,18 +5159,6 @@ static int io_sq_thread(void *data)
51525159 continue ;
51535160 }
51545161
5155- /*
5156- * Drop cur_mm before scheduling, we can't hold it for
5157- * long periods (or over schedule()). Do this before
5158- * adding ourselves to the waitqueue, as the unuse/drop
5159- * may sleep.
5160- */
5161- if (cur_mm ) {
5162- unuse_mm (cur_mm );
5163- mmput (cur_mm );
5164- cur_mm = NULL ;
5165- }
5166-
51675162 prepare_to_wait (& ctx -> sqo_wait , & wait ,
51685163 TASK_INTERRUPTIBLE );
51695164
0 commit comments