Skip to content

Commit 00ac2a4

Browse files
metze-sambasmfrench
authored andcommitted
smb: smbdirect: remove unused smbdirect_connection_mr_io_recovery_work()
This would actually never be used as we only move to SMBDIRECT_MR_ERROR when we directly call smbdirect_socket_schedule_cleanup(). Doing an ib_dereg_mr/ib_alloc_mr dance on working connection is not needed and it's also pointless on a broken connection as we don't reuse any ib_pd. Cc: Steve French <smfrench@gmail.com> Cc: Tom Talpey <tom@talpey.com> Cc: Long Li <longli@microsoft.com> Cc: Namjae Jeon <linkinjeon@kernel.org> Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com>
1 parent a40e6f0 commit 00ac2a4

1 file changed

Lines changed: 10 additions & 85 deletions

File tree

fs/smb/common/smbdirect/smbdirect_mr.c

Lines changed: 10 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
#include "smbdirect_internal.h"
88

9-
static void smbdirect_connection_mr_io_recovery_work(struct work_struct *work);
10-
119
/*
1210
* Allocate MRs used for RDMA read/write
1311
* The number of MRs will not exceed hardware capability in responder_resources
@@ -66,8 +64,6 @@ int smbdirect_connection_create_mr_list(struct smbdirect_socket *sc)
6664
atomic_inc(&sc->mr_io.ready.count);
6765
}
6866

69-
INIT_WORK(&sc->mr_io.recovery_work, smbdirect_connection_mr_io_recovery_work);
70-
7167
return 0;
7268

7369
kcalloc_sgl_failed:
@@ -127,8 +123,6 @@ void smbdirect_connection_destroy_mr_list(struct smbdirect_socket *sc)
127123
LIST_HEAD(all_list);
128124
unsigned long flags;
129125

130-
disable_work_sync(&sc->mr_io.recovery_work);
131-
132126
spin_lock_irqsave(&sc->mr_io.all.lock, flags);
133127
list_splice_tail_init(&sc->mr_io.all.list, &all_list);
134128
spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
@@ -164,11 +158,8 @@ void smbdirect_connection_destroy_mr_list(struct smbdirect_socket *sc)
164158

165159
/*
166160
* Get a MR from mr_list. This function waits until there is at least one MR
167-
* available in the list. It may access the list while the
168-
* smbdirect_connection_mr_io_recovery_work is recovering the MR list. This
169-
* doesn't need a lock as they never modify the same places. However, there may
170-
* be several CPUs issuing I/O trying to get MR at the same time, mr_list_lock
171-
* is used to protect this situation.
161+
* available in the list. There may be several CPUs issuing I/O trying to get MR
162+
* at the same time, mr_list_lock is used to protect this situation.
172163
*/
173164
static struct smbdirect_mr_io *
174165
smbdirect_connection_get_mr_io(struct smbdirect_socket *sc)
@@ -246,65 +237,6 @@ static void smbdirect_connection_mr_io_local_inv_done(struct ib_cq *cq, struct i
246237
complete(&mr->invalidate_done);
247238
}
248239

249-
/*
250-
* The work queue function that recovers MRs
251-
* We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
252-
* again. Both calls are slow, so finish them in a workqueue. This will not
253-
* block I/O path.
254-
* There is one workqueue that recovers MRs, there is no need to lock as the
255-
* I/O requests calling smbd_register_mr will never update the links in the
256-
* mr_list.
257-
*/
258-
static void smbdirect_connection_mr_io_recovery_work(struct work_struct *work)
259-
{
260-
struct smbdirect_socket *sc =
261-
container_of(work, struct smbdirect_socket, mr_io.recovery_work);
262-
struct smbdirect_socket_parameters *sp = &sc->parameters;
263-
struct smbdirect_mr_io *mr;
264-
int ret;
265-
266-
list_for_each_entry(mr, &sc->mr_io.all.list, list) {
267-
if (mr->state != SMBDIRECT_MR_ERROR)
268-
/* This MR is being used, don't recover it */
269-
continue;
270-
271-
/* recover this MR entry */
272-
ret = ib_dereg_mr(mr->mr);
273-
if (ret) {
274-
smbdirect_log_rdma_mr(sc, SMBDIRECT_LOG_ERR,
275-
"ib_dereg_mr failed ret=%u (%1pe)\n",
276-
ret, SMBDIRECT_DEBUG_ERR_PTR(ret));
277-
smbdirect_socket_schedule_cleanup(sc, ret);
278-
continue;
279-
}
280-
281-
mr->mr = ib_alloc_mr(sc->ib.pd,
282-
sc->mr_io.type,
283-
sp->max_frmr_depth);
284-
if (IS_ERR(mr->mr)) {
285-
ret = PTR_ERR(mr->mr);
286-
smbdirect_log_rdma_mr(sc, SMBDIRECT_LOG_ERR,
287-
"ib_alloc_mr failed ret=%d (%1pe) type=0x%x depth=%u\n",
288-
ret, SMBDIRECT_DEBUG_ERR_PTR(ret),
289-
sc->mr_io.type, sp->max_frmr_depth);
290-
smbdirect_socket_schedule_cleanup(sc, ret);
291-
continue;
292-
}
293-
294-
mr->state = SMBDIRECT_MR_READY;
295-
296-
/* smbdirect_mr->state is updated by this function
297-
* and is read and updated by I/O issuing CPUs trying
298-
* to get a MR, the call to atomic_inc_return
299-
* implicates a memory barrier and guarantees this
300-
* value is updated before waking up any calls to
301-
* get_mr() from the I/O issuing CPUs
302-
*/
303-
if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
304-
wake_up(&sc->mr_io.ready.wait_queue);
305-
}
306-
}
307-
308240
/*
309241
* Transcribe the pages from an iterator into an MR scatterlist.
310242
*/
@@ -421,15 +353,13 @@ smbdirect_connection_register_mr_io(struct smbdirect_socket *sc,
421353
"ib_post_send failed ret=%d (%1pe) reg_wr->key=0x%x\n",
422354
ret, SMBDIRECT_DEBUG_ERR_PTR(ret), reg_wr->key);
423355

424-
/* If all failed, attempt to recover this MR by setting it SMBDIRECT_MR_ERROR*/
425356
map_mr_error:
426357
ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
427358

428359
dma_map_error:
429360
mr->sgt.nents = 0;
430361
mr->state = SMBDIRECT_MR_ERROR;
431-
if (atomic_dec_and_test(&sc->mr_io.used.count))
432-
wake_up(&sc->mr_io.cleanup.wait_queue);
362+
atomic_dec(&sc->mr_io.used.count);
433363

434364
smbdirect_socket_schedule_cleanup(sc, ret);
435365

@@ -529,20 +459,15 @@ void smbdirect_connection_deregister_mr_io(struct smbdirect_mr_io *mr)
529459
mr->sgt.nents = 0;
530460
}
531461

532-
if (mr->state == SMBDIRECT_MR_INVALIDATED) {
533-
mr->state = SMBDIRECT_MR_READY;
534-
if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
535-
wake_up(&sc->mr_io.ready.wait_queue);
536-
} else
537-
/*
538-
* Schedule the work to do MR recovery for future I/Os MR
539-
* recovery is slow and don't want it to block current I/O
540-
*/
541-
queue_work(sc->workqueue, &sc->mr_io.recovery_work);
462+
WARN_ONCE(mr->state != SMBDIRECT_MR_INVALIDATED,
463+
"mr->state[%u] != SMBDIRECT_MR_INVALIDATED[%u]\n",
464+
mr->state, SMBDIRECT_MR_INVALIDATED);
465+
mr->state = SMBDIRECT_MR_READY;
466+
if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
467+
wake_up(&sc->mr_io.ready.wait_queue);
542468

543469
done:
544-
if (atomic_dec_and_test(&sc->mr_io.used.count))
545-
wake_up(&sc->mr_io.cleanup.wait_queue);
470+
atomic_dec(&sc->mr_io.used.count);
546471

547472
put_kref:
548473
/*

0 commit comments

Comments
 (0)