From 24ca9a847791fd53d9b217330b15f3c285827a18 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Nov 2011 14:44:28 +0200 Subject: SUNRPC: Ensure we return EAGAIN in xs_nospace if congestion is cleared By returning '0' instead of 'EAGAIN' when the tests in xs_nospace() fail to find evidence of socket congestion, we are making the RPC engine believe that the message was incorrectly sent and so it disconnects the socket instead of just retrying. The bug appears to have been introduced by commit 5e3771ce2d6a69e10fcc870cdf226d121d868491 (SUNRPC: Ensure that xs_nospace return values are propagated). Reported-by: Andrew Cooper Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org [>= 2.6.30] Tested-by: Andrew Cooper --- net/sunrpc/xprtsock.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2d78d95955ab..55472c48825e 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); - int ret = 0; + int ret = -EAGAIN; dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", task->tk_pid, req->rq_slen - req->rq_bytes_sent, @@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task) /* Don't race with disconnect */ if (xprt_connected(xprt)) { if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { - ret = -EAGAIN; /* * Notify TCP that we're limited by the application * window size -- cgit v1.2.3 From 7fdcf13b292e8b2e38e42de24be2503e37b2cf97 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Dec 2011 14:00:15 -0500 Subject: SUNRPC: Fix the execution time statistics in the face of RPC restarts If the rpc_task gets restarted, then we want to ensure that we don't double-count the execution time statistics, timeout data, etc. Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index d12ffa545811..00a1a2acd587 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task) task->tk_ops->rpc_call_prepare(task, task->tk_calldata); } +static void +rpc_init_task_statistics(struct rpc_task *task) +{ + /* Initialize retry counters */ + task->tk_garb_retry = 2; + task->tk_cred_retry = 2; + task->tk_rebind_retry = 2; + + /* starting timestamp */ + task->tk_start = ktime_get(); +} + +static void +rpc_reset_task_statistics(struct rpc_task *task) +{ + task->tk_timeouts = 0; + task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); + + rpc_init_task_statistics(task); +} + /* * Helper that calls task->tk_ops->rpc_call_done if it exists */ @@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task) WARN_ON(RPC_ASSASSINATED(task)); /* Always release the RPC slot and buffer memory */ xprt_release(task); + rpc_reset_task_statistics(task); } } } @@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta task->tk_calldata = task_setup_data->callback_data; INIT_LIST_HEAD(&task->tk_task); - /* Initialize retry counters */ - task->tk_garb_retry = 2; - task->tk_cred_retry = 2; - task->tk_rebind_retry = 2; - task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; task->tk_owner = current->tgid; @@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta if (task->tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; - /* starting timestamp */ - task->tk_start = ktime_get(); + rpc_init_task_statistics(task); dprintk("RPC: new task initialized, procpid %u\n", task_pid_nr(current)); -- cgit v1.2.3 From c25573b5134294c0be82bfaecc6d08136835b271 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Dec 2011 14:16:17 -0500 Subject: SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot Whenever we free a slot, we know that the resulting xprt->num_reqs will be less than xprt->max_reqs, so we know that we can release at least one backlogged rpc_task. Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org [>=3.1] --- net/sunrpc/xprt.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f4385e45a5fc..c64c0ef519b5 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -995,13 +995,11 @@ out_init_req: static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) { - if (xprt_dynamic_free_slot(xprt, req)) - return; - - memset(req, 0, sizeof(*req)); /* mark unused */ - spin_lock(&xprt->reserve_lock); - list_add(&req->rq_list, &xprt->free); + if (!xprt_dynamic_free_slot(xprt, req)) { + memset(req, 0, sizeof(*req)); /* mark unused */ + list_add(&req->rq_list, &xprt->free); + } rpc_wake_up_next(&xprt->backlog); spin_unlock(&xprt->reserve_lock); } -- cgit v1.2.3