From ff526d86051f3689fe41e235ab962389d43999a9 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 20 Sep 2018 21:18:39 +0000 Subject: CIFS: SMBD: Do not call ib_dereg_mr on invalidated memory registration It is not necessary to deregister a memory registration after it has been successfully invalidated. Signed-off-by: Long Li Signed-off-by: Steve French --- fs/cifs/smbdirect.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'fs/cifs') diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 5fdb9a509a97..5e282368cc4a 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2295,8 +2295,12 @@ static void smbd_mr_recovery_work(struct work_struct *work) int rc; list_for_each_entry(smbdirect_mr, &info->mr_list, list) { - if (smbdirect_mr->state == MR_INVALIDATED || - smbdirect_mr->state == MR_ERROR) { + if (smbdirect_mr->state == MR_INVALIDATED) + ib_dma_unmap_sg( + info->id->device, smbdirect_mr->sgl, + smbdirect_mr->sgl_count, + smbdirect_mr->dir); + else if (smbdirect_mr->state == MR_ERROR) { /* recover this MR entry */ rc = ib_dereg_mr(smbdirect_mr->mr); @@ -2320,25 +2324,21 @@ static void smbd_mr_recovery_work(struct work_struct *work) smbd_disconnect_rdma_connection(info); continue; } + } else + /* This MR is being used, don't recover it */ + continue; - if (smbdirect_mr->state == MR_INVALIDATED) - ib_dma_unmap_sg( - info->id->device, smbdirect_mr->sgl, - smbdirect_mr->sgl_count, - smbdirect_mr->dir); - - smbdirect_mr->state = MR_READY; + smbdirect_mr->state = MR_READY; - /* smbdirect_mr->state is updated by this function - * and is read and updated by I/O issuing CPUs trying - * to get a MR, the call to atomic_inc_return - * implicates a memory barrier and guarantees this - * value is updated before waking up any calls to - * get_mr() from the I/O issuing CPUs - */ - if (atomic_inc_return(&info->mr_ready_count) == 1) - wake_up_interruptible(&info->wait_mr); - } + /* smbdirect_mr->state is updated by this function + * and is read and updated by I/O issuing CPUs trying + * to get a MR, the call to atomic_inc_return + * implicates a memory barrier and guarantees this + * value is updated before waking up any calls to + * get_mr() from the I/O issuing CPUs + */ + if (atomic_inc_return(&info->mr_ready_count) == 1) + wake_up_interruptible(&info->wait_mr); } } -- cgit v1.2.3