|
5 | 5 |
|
6 | 6 | #include "io_uring.h" |
7 | 7 | #include "register.h" |
| 8 | +#include "loop.h" |
8 | 9 | #include "memmap.h" |
9 | 10 | #include "bpf-ops.h" |
10 | | -#include "loop.h" |
11 | 11 |
|
| 12 | +static DEFINE_MUTEX(io_bpf_ctrl_mutex); |
12 | 13 | static const struct btf_type *loop_params_type; |
13 | 14 |
|
14 | 15 | __bpf_kfunc_start_defs(); |
@@ -143,16 +144,103 @@ static int bpf_io_init_member(const struct btf_type *t, |
143 | 144 | const struct btf_member *member, |
144 | 145 | void *kdata, const void *udata) |
145 | 146 | { |
| 147 | + u32 moff = __btf_member_bit_offset(t, member) / 8; |
| 148 | + const struct io_uring_bpf_ops *uops = udata; |
| 149 | + struct io_uring_bpf_ops *ops = kdata; |
| 150 | + |
| 151 | + switch (moff) { |
| 152 | + case offsetof(struct io_uring_bpf_ops, ring_fd): |
| 153 | + ops->ring_fd = uops->ring_fd; |
| 154 | + return 1; |
| 155 | + } |
| 156 | + return 0; |
| 157 | +} |
| 158 | + |
| 159 | +static int io_install_bpf(struct io_ring_ctx *ctx, struct io_uring_bpf_ops *ops) |
| 160 | +{ |
| 161 | + if (ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_IOPOLL)) |
| 162 | + return -EOPNOTSUPP; |
| 163 | + if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) |
| 164 | + return -EOPNOTSUPP; |
| 165 | + |
| 166 | + if (ctx->bpf_ops) |
| 167 | + return -EBUSY; |
| 168 | + if (WARN_ON_ONCE(!ops->loop_step)) |
| 169 | + return -EINVAL; |
| 170 | + |
| 171 | + ops->priv = ctx; |
| 172 | + ctx->bpf_ops = ops; |
| 173 | + ctx->loop_step = ops->loop_step; |
146 | 174 | return 0; |
147 | 175 | } |
148 | 176 |
|
149 | 177 | static int bpf_io_reg(void *kdata, struct bpf_link *link) |
150 | 178 | { |
151 | | - return -EOPNOTSUPP; |
| 179 | + struct io_uring_bpf_ops *ops = kdata; |
| 180 | + struct io_ring_ctx *ctx; |
| 181 | + struct file *file; |
| 182 | + int ret = -EBUSY; |
| 183 | + |
| 184 | + file = io_uring_register_get_file(ops->ring_fd, false); |
| 185 | + if (IS_ERR(file)) |
| 186 | + return PTR_ERR(file); |
| 187 | + ctx = file->private_data; |
| 188 | + |
| 189 | + scoped_guard(mutex, &io_bpf_ctrl_mutex) { |
| 190 | + guard(mutex)(&ctx->uring_lock); |
| 191 | + ret = io_install_bpf(ctx, ops); |
| 192 | + } |
| 193 | + |
| 194 | + fput(file); |
| 195 | + return ret; |
| 196 | +} |
| 197 | + |
| 198 | +static void io_eject_bpf(struct io_ring_ctx *ctx) |
| 199 | +{ |
| 200 | + struct io_uring_bpf_ops *ops = ctx->bpf_ops; |
| 201 | + |
| 202 | + if (WARN_ON_ONCE(!ops)) |
| 203 | + return; |
| 204 | + if (WARN_ON_ONCE(ops->priv != ctx)) |
| 205 | + return; |
| 206 | + |
| 207 | + ops->priv = NULL; |
| 208 | + ctx->bpf_ops = NULL; |
| 209 | + ctx->loop_step = NULL; |
152 | 210 | } |
153 | 211 |
|
154 | 212 | static void bpf_io_unreg(void *kdata, struct bpf_link *link) |
155 | 213 | { |
| 214 | + struct io_uring_bpf_ops *ops = kdata; |
| 215 | + struct io_ring_ctx *ctx; |
| 216 | + |
| 217 | + guard(mutex)(&io_bpf_ctrl_mutex); |
| 218 | + ctx = ops->priv; |
| 219 | + if (ctx) { |
| 220 | + guard(mutex)(&ctx->uring_lock); |
| 221 | + if (WARN_ON_ONCE(ctx->bpf_ops != ops)) |
| 222 | + return; |
| 223 | + |
| 224 | + io_eject_bpf(ctx); |
| 225 | + } |
| 226 | +} |
| 227 | + |
| 228 | +void io_unregister_bpf_ops(struct io_ring_ctx *ctx) |
| 229 | +{ |
| 230 | + /* |
| 231 | + * ->bpf_ops is write protected by io_bpf_ctrl_mutex and uring_lock, |
| 232 | + * and read protected by either. Try to avoid taking the global lock |
| 233 | + * for rings that never had any bpf installed. |
| 234 | + */ |
| 235 | + scoped_guard(mutex, &ctx->uring_lock) { |
| 236 | + if (!ctx->bpf_ops) |
| 237 | + return; |
| 238 | + } |
| 239 | + |
| 240 | + guard(mutex)(&io_bpf_ctrl_mutex); |
| 241 | + guard(mutex)(&ctx->uring_lock); |
| 242 | + if (ctx->bpf_ops) |
| 243 | + io_eject_bpf(ctx); |
156 | 244 | } |
157 | 245 |
|
158 | 246 | static struct bpf_struct_ops bpf_ring_ops = { |
|
0 commit comments