diff options
author | NeilBrown <neilb@suse.de> | 2022-03-07 10:41:45 +1100 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2022-03-13 12:59:36 -0400 |
commit | 693486d5f8951780a9bb31f7fe935171a80010e4 (patch) | |
tree | 2a5300915884d15ad3bde2dd8e3c26de6ed02cac | |
parent | c265de257f558a05c1859ee9e3fed04883b9ec0e (diff) | |
download | linux-693486d5f8951780a9bb31f7fe935171a80010e4.tar.bz2 |
SUNRPC: change locking for xs_swap_enable/disable
It is not in general safe to wait for XPRT_LOCKED to clear.
A wakeup is only sent when
- connection completes
- sock close completes
so during normal operations, this can wait indefinitely.
The event we need to protect against is ->inet being set to NULL, and
that happens under the recv_mutex lock.
So drop the handlign of XPRT_LOCKED and use recv_mutex instead.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
-rw-r--r-- | net/sunrpc/xprtsock.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 61d3293f1d68..7e39f87cde2d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1936,9 +1936,9 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) #if IS_ENABLED(CONFIG_SUNRPC_SWAP) /* - * Note that this should be called with XPRT_LOCKED held (or when we otherwise - * know that we have exclusive access to the socket), to guard against - * races with xs_reset_transport. + * Note that this should be called with XPRT_LOCKED held, or recv_mutex + * held, or when we otherwise know that we have exclusive access to the + * socket, to guard against races with xs_reset_transport. */ static void xs_set_memalloc(struct rpc_xprt *xprt) { @@ -1967,13 +1967,11 @@ xs_enable_swap(struct rpc_xprt *xprt) { struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); - if (atomic_inc_return(&xprt->swapper) != 1) - return 0; - if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) - return -ERESTARTSYS; - if (xs->inet) + mutex_lock(&xs->recv_mutex); + if (atomic_inc_return(&xprt->swapper) == 1 && + xs->inet) sk_set_memalloc(xs->inet); - xprt_release_xprt(xprt, NULL); + mutex_unlock(&xs->recv_mutex); return 0; } @@ -1989,13 +1987,11 @@ xs_disable_swap(struct rpc_xprt *xprt) { struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); - if (!atomic_dec_and_test(&xprt->swapper)) - return; - if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) - return; - if (xs->inet) + mutex_lock(&xs->recv_mutex); + if (atomic_dec_and_test(&xprt->swapper) && + xs->inet) sk_clear_memalloc(xs->inet); - xprt_release_xprt(xprt, NULL); + mutex_unlock(&xs->recv_mutex); } #else static void xs_set_memalloc(struct rpc_xprt *xprt) |