|
6 | 6 |
|
7 | 7 | #include "smbdirect_internal.h" |
8 | 8 |
|
9 | | -static void smbdirect_connection_mr_io_recovery_work(struct work_struct *work); |
10 | | - |
11 | 9 | /* |
12 | 10 | * Allocate MRs used for RDMA read/write |
13 | 11 | * The number of MRs will not exceed hardware capability in responder_resources |
@@ -66,8 +64,6 @@ int smbdirect_connection_create_mr_list(struct smbdirect_socket *sc) |
66 | 64 | atomic_inc(&sc->mr_io.ready.count); |
67 | 65 | } |
68 | 66 |
|
69 | | - INIT_WORK(&sc->mr_io.recovery_work, smbdirect_connection_mr_io_recovery_work); |
70 | | - |
71 | 67 | return 0; |
72 | 68 |
|
73 | 69 | kcalloc_sgl_failed: |
@@ -127,8 +123,6 @@ void smbdirect_connection_destroy_mr_list(struct smbdirect_socket *sc) |
127 | 123 | LIST_HEAD(all_list); |
128 | 124 | unsigned long flags; |
129 | 125 |
|
130 | | - disable_work_sync(&sc->mr_io.recovery_work); |
131 | | - |
132 | 126 | spin_lock_irqsave(&sc->mr_io.all.lock, flags); |
133 | 127 | list_splice_tail_init(&sc->mr_io.all.list, &all_list); |
134 | 128 | spin_unlock_irqrestore(&sc->mr_io.all.lock, flags); |
@@ -164,11 +158,8 @@ void smbdirect_connection_destroy_mr_list(struct smbdirect_socket *sc) |
164 | 158 |
|
165 | 159 | /* |
166 | 160 | * Get a MR from mr_list. This function waits until there is at least one MR |
167 | | - * available in the list. It may access the list while the |
168 | | - * smbdirect_connection_mr_io_recovery_work is recovering the MR list. This |
169 | | - * doesn't need a lock as they never modify the same places. However, there may |
170 | | - * be several CPUs issuing I/O trying to get MR at the same time, mr_list_lock |
171 | | - * is used to protect this situation. |
| 161 | + * available in the list. There may be several CPUs issuing I/O trying to get MR |
| 162 | + * at the same time, mr_list_lock is used to protect this situation. |
172 | 163 | */ |
173 | 164 | static struct smbdirect_mr_io * |
174 | 165 | smbdirect_connection_get_mr_io(struct smbdirect_socket *sc) |
@@ -246,65 +237,6 @@ static void smbdirect_connection_mr_io_local_inv_done(struct ib_cq *cq, struct i |
246 | 237 | complete(&mr->invalidate_done); |
247 | 238 | } |
248 | 239 |
|
249 | | -/* |
250 | | - * The work queue function that recovers MRs |
251 | | - * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used |
252 | | - * again. Both calls are slow, so finish them in a workqueue. This will not |
253 | | - * block I/O path. |
254 | | - * There is one workqueue that recovers MRs, there is no need to lock as the |
255 | | - * I/O requests calling smbd_register_mr will never update the links in the |
256 | | - * mr_list. |
257 | | - */ |
258 | | -static void smbdirect_connection_mr_io_recovery_work(struct work_struct *work) |
259 | | -{ |
260 | | - struct smbdirect_socket *sc = |
261 | | - container_of(work, struct smbdirect_socket, mr_io.recovery_work); |
262 | | - struct smbdirect_socket_parameters *sp = &sc->parameters; |
263 | | - struct smbdirect_mr_io *mr; |
264 | | - int ret; |
265 | | - |
266 | | - list_for_each_entry(mr, &sc->mr_io.all.list, list) { |
267 | | - if (mr->state != SMBDIRECT_MR_ERROR) |
268 | | - /* This MR is being used, don't recover it */ |
269 | | - continue; |
270 | | - |
271 | | - /* recover this MR entry */ |
272 | | - ret = ib_dereg_mr(mr->mr); |
273 | | - if (ret) { |
274 | | - smbdirect_log_rdma_mr(sc, SMBDIRECT_LOG_ERR, |
275 | | - "ib_dereg_mr failed ret=%u (%1pe)\n", |
276 | | - ret, SMBDIRECT_DEBUG_ERR_PTR(ret)); |
277 | | - smbdirect_socket_schedule_cleanup(sc, ret); |
278 | | - continue; |
279 | | - } |
280 | | - |
281 | | - mr->mr = ib_alloc_mr(sc->ib.pd, |
282 | | - sc->mr_io.type, |
283 | | - sp->max_frmr_depth); |
284 | | - if (IS_ERR(mr->mr)) { |
285 | | - ret = PTR_ERR(mr->mr); |
286 | | - smbdirect_log_rdma_mr(sc, SMBDIRECT_LOG_ERR, |
287 | | - "ib_alloc_mr failed ret=%d (%1pe) type=0x%x depth=%u\n", |
288 | | - ret, SMBDIRECT_DEBUG_ERR_PTR(ret), |
289 | | - sc->mr_io.type, sp->max_frmr_depth); |
290 | | - smbdirect_socket_schedule_cleanup(sc, ret); |
291 | | - continue; |
292 | | - } |
293 | | - |
294 | | - mr->state = SMBDIRECT_MR_READY; |
295 | | - |
296 | | - /* smbdirect_mr->state is updated by this function |
297 | | - * and is read and updated by I/O issuing CPUs trying |
298 | | - * to get a MR, the call to atomic_inc_return |
299 | | - * implicates a memory barrier and guarantees this |
300 | | - * value is updated before waking up any calls to |
301 | | - * get_mr() from the I/O issuing CPUs |
302 | | - */ |
303 | | - if (atomic_inc_return(&sc->mr_io.ready.count) == 1) |
304 | | - wake_up(&sc->mr_io.ready.wait_queue); |
305 | | - } |
306 | | -} |
307 | | - |
308 | 240 | /* |
309 | 241 | * Transcribe the pages from an iterator into an MR scatterlist. |
310 | 242 | */ |
@@ -421,15 +353,13 @@ smbdirect_connection_register_mr_io(struct smbdirect_socket *sc, |
421 | 353 | "ib_post_send failed ret=%d (%1pe) reg_wr->key=0x%x\n", |
422 | 354 | ret, SMBDIRECT_DEBUG_ERR_PTR(ret), reg_wr->key); |
423 | 355 |
|
424 | | - /* If all failed, attempt to recover this MR by setting it SMBDIRECT_MR_ERROR*/ |
425 | 356 | map_mr_error: |
426 | 357 | ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); |
427 | 358 |
|
428 | 359 | dma_map_error: |
429 | 360 | mr->sgt.nents = 0; |
430 | 361 | mr->state = SMBDIRECT_MR_ERROR; |
431 | | - if (atomic_dec_and_test(&sc->mr_io.used.count)) |
432 | | - wake_up(&sc->mr_io.cleanup.wait_queue); |
| 362 | + atomic_dec(&sc->mr_io.used.count); |
433 | 363 |
|
434 | 364 | smbdirect_socket_schedule_cleanup(sc, ret); |
435 | 365 |
|
@@ -529,20 +459,15 @@ void smbdirect_connection_deregister_mr_io(struct smbdirect_mr_io *mr) |
529 | 459 | mr->sgt.nents = 0; |
530 | 460 | } |
531 | 461 |
|
532 | | - if (mr->state == SMBDIRECT_MR_INVALIDATED) { |
533 | | - mr->state = SMBDIRECT_MR_READY; |
534 | | - if (atomic_inc_return(&sc->mr_io.ready.count) == 1) |
535 | | - wake_up(&sc->mr_io.ready.wait_queue); |
536 | | - } else |
537 | | - /* |
538 | | - * Schedule the work to do MR recovery for future I/Os MR |
539 | | - * recovery is slow and don't want it to block current I/O |
540 | | - */ |
541 | | - queue_work(sc->workqueue, &sc->mr_io.recovery_work); |
| 462 | + WARN_ONCE(mr->state != SMBDIRECT_MR_INVALIDATED, |
| 463 | + "mr->state[%u] != SMBDIRECT_MR_INVALIDATED[%u]\n", |
| 464 | + mr->state, SMBDIRECT_MR_INVALIDATED); |
| 465 | + mr->state = SMBDIRECT_MR_READY; |
| 466 | + if (atomic_inc_return(&sc->mr_io.ready.count) == 1) |
| 467 | + wake_up(&sc->mr_io.ready.wait_queue); |
542 | 468 |
|
543 | 469 | done: |
544 | | - if (atomic_dec_and_test(&sc->mr_io.used.count)) |
545 | | - wake_up(&sc->mr_io.cleanup.wait_queue); |
| 470 | + atomic_dec(&sc->mr_io.used.count); |
546 | 471 |
|
547 | 472 | put_kref: |
548 | 473 | /* |
|
0 commit comments