|
8 | 8 | #include <linux/llist.h> |
9 | 9 | #include <uapi/linux/io_uring.h> |
10 | 10 |
|
| 11 | +struct iou_loop_params; |
| 12 | +struct io_uring_bpf_ops; |
| 13 | + |
11 | 14 | enum { |
12 | 15 | /* |
13 | 16 | * A hint to not wake right away but delay until there are enough of |
@@ -41,6 +44,8 @@ enum io_uring_cmd_flags { |
41 | 44 | IO_URING_F_COMPAT = (1 << 12), |
42 | 45 | }; |
43 | 46 |
|
| 47 | +struct iou_loop_params; |
| 48 | + |
44 | 49 | struct io_wq_work_node { |
45 | 50 | struct io_wq_work_node *next; |
46 | 51 | }; |
@@ -268,24 +273,30 @@ struct io_alloc_cache { |
268 | 273 | unsigned int init_clear; |
269 | 274 | }; |
270 | 275 |
|
| 276 | +enum { |
| 277 | + IO_RING_F_DRAIN_NEXT = BIT(0), |
| 278 | + IO_RING_F_OP_RESTRICTED = BIT(1), |
| 279 | + IO_RING_F_REG_RESTRICTED = BIT(2), |
| 280 | + IO_RING_F_OFF_TIMEOUT_USED = BIT(3), |
| 281 | + IO_RING_F_DRAIN_ACTIVE = BIT(4), |
| 282 | + IO_RING_F_HAS_EVFD = BIT(5), |
| 283 | + /* all CQEs should be posted only by the submitter task */ |
| 284 | + IO_RING_F_TASK_COMPLETE = BIT(6), |
| 285 | + IO_RING_F_LOCKLESS_CQ = BIT(7), |
| 286 | + IO_RING_F_SYSCALL_IOPOLL = BIT(8), |
| 287 | + IO_RING_F_POLL_ACTIVATED = BIT(9), |
| 288 | + IO_RING_F_DRAIN_DISABLED = BIT(10), |
| 289 | + IO_RING_F_COMPAT = BIT(11), |
| 290 | + IO_RING_F_IOWQ_LIMITS_SET = BIT(12), |
| 291 | +}; |
| 292 | + |
271 | 293 | struct io_ring_ctx { |
272 | 294 | /* const or read-mostly hot data */ |
273 | 295 | struct { |
| 296 | + /* ring setup flags */ |
274 | 297 | unsigned int flags; |
275 | | - unsigned int drain_next: 1; |
276 | | - unsigned int op_restricted: 1; |
277 | | - unsigned int reg_restricted: 1; |
278 | | - unsigned int off_timeout_used: 1; |
279 | | - unsigned int drain_active: 1; |
280 | | - unsigned int has_evfd: 1; |
281 | | - /* all CQEs should be posted only by the submitter task */ |
282 | | - unsigned int task_complete: 1; |
283 | | - unsigned int lockless_cq: 1; |
284 | | - unsigned int syscall_iopoll: 1; |
285 | | - unsigned int poll_activated: 1; |
286 | | - unsigned int drain_disabled: 1; |
287 | | - unsigned int compat: 1; |
288 | | - unsigned int iowq_limits_set : 1; |
| 298 | + /* internal state flags IO_RING_F_* flags , mostly read-only */ |
| 299 | + unsigned int int_flags; |
289 | 300 |
|
290 | 301 | struct task_struct *submitter_task; |
291 | 302 | struct io_rings *rings; |
@@ -355,6 +366,9 @@ struct io_ring_ctx { |
355 | 366 | struct io_alloc_cache rw_cache; |
356 | 367 | struct io_alloc_cache cmd_cache; |
357 | 368 |
|
| 369 | + int (*loop_step)(struct io_ring_ctx *ctx, |
| 370 | + struct iou_loop_params *); |
| 371 | + |
358 | 372 | /* |
359 | 373 | * Any cancelable uring_cmd is added to this list in |
360 | 374 | * ->uring_cmd() by io_uring_cmd_insert_cancelable() |
@@ -477,6 +491,8 @@ struct io_ring_ctx { |
477 | 491 | DECLARE_HASHTABLE(napi_ht, 4); |
478 | 492 | #endif |
479 | 493 |
|
| 494 | + struct io_uring_bpf_ops *bpf_ops; |
| 495 | + |
480 | 496 | /* |
481 | 497 | * Protection for resize vs mmap races - both the mmap and resize |
482 | 498 | * side will need to grab this lock, to prevent either side from |
@@ -545,6 +561,7 @@ enum { |
545 | 561 | REQ_F_HAS_METADATA_BIT, |
546 | 562 | REQ_F_IMPORT_BUFFER_BIT, |
547 | 563 | REQ_F_SQE_COPIED_BIT, |
| 564 | + REQ_F_IOPOLL_BIT, |
548 | 565 |
|
549 | 566 | /* not a real bit, just to check we're not overflowing the space */ |
550 | 567 | __REQ_F_LAST_BIT, |
@@ -638,6 +655,8 @@ enum { |
638 | 655 | REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT), |
639 | 656 | /* ->sqe_copy() has been called, if necessary */ |
640 | 657 | REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT), |
| 658 | + /* request must be iopolled to completion (set in ->issue()) */ |
| 659 | + REQ_F_IOPOLL = IO_REQ_FLAG(REQ_F_IOPOLL_BIT), |
641 | 660 | }; |
642 | 661 |
|
643 | 662 | struct io_tw_req { |
|
0 commit comments