summaryrefslogtreecommitdiffstats
path: root/net/smc/af_smc.c
diff options
context:
space:
mode:
authorUrsula Braun <ubraun@linux.vnet.ibm.com>2017-01-09 16:55:25 +0100
committerDavid S. Miller <davem@davemloft.net>2017-01-09 16:07:40 -0500
commitb38d732477e4211351b2680e805d944f66bceec9 (patch)
treec4d7764d21d9a5e6de2197292c7cf9b75a7aba81 /net/smc/af_smc.c
parent952310ccf2d861966cfb8706f16d5e4eb585edb7 (diff)
downloadlinux-b38d732477e4211351b2680e805d944f66bceec9.tar.bz2
smc: socket closing and linkgroup cleanup
smc_shutdown() and smc_release() handling delayed linkgroup cleanup for linkgroups without connections Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/af_smc.c')
-rw-r--r--net/smc/af_smc.c127
1 files changed, 105 insertions, 22 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index fc9c51c549e5..3f543d58bc5c 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -39,6 +39,7 @@
#include "smc_pnet.h"
#include "smc_tx.h"
#include "smc_rx.h"
+#include "smc_close.h"
static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
* creation
@@ -70,14 +71,29 @@ static int smc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
+ int rc = 0;
if (!sk)
goto out;
smc = smc_sk(sk);
- lock_sock(sk);
+ sock_hold(sk);
+ if (sk->sk_state == SMC_LISTEN)
+ /* smc_close_non_accepted() is called and acquires
+ * sock lock for child sockets again
+ */
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ else
+ lock_sock(sk);
- sk->sk_state = SMC_CLOSED;
+ if (smc->use_fallback) {
+ sk->sk_state = SMC_CLOSED;
+ sk->sk_state_change(sk);
+ } else {
+ rc = smc_close_active(smc);
+ sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ }
if (smc->clcsock) {
sock_release(smc->clcsock);
smc->clcsock = NULL;
@@ -86,11 +102,18 @@ static int smc_release(struct socket *sock)
/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
+ if (smc->use_fallback) {
+ schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
+ } else if (sk->sk_state == SMC_CLOSED) {
+ smc_conn_free(&smc->conn);
+ schedule_delayed_work(&smc->sock_put_work,
+ SMC_CLOSE_SOCK_PUT_DELAY);
+ }
release_sock(sk);
sock_put(sk);
out:
- return 0;
+ return rc;
}
static void smc_destruct(struct sock *sk)
@@ -120,6 +143,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_LIST_HEAD(&smc->accept_q);
spin_lock_init(&smc->accept_q_lock);
+ INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
sk_refcnt_debug_inc(sk);
return sk;
@@ -417,7 +441,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
out_connected:
smc_copy_sock_settings_to_clc(smc);
- smc->sk.sk_state = SMC_ACTIVE;
+ if (smc->sk.sk_state == SMC_INIT)
+ smc->sk.sk_state = SMC_ACTIVE;
return rc ? rc : local_contact;
@@ -559,8 +584,8 @@ static void smc_accept_unlink(struct sock *sk)
/* remove a sock from the accept queue to bind it to a new socket created
* for a socket accept call from user space
*/
-static struct sock *smc_accept_dequeue(struct sock *parent,
- struct socket *new_sock)
+struct sock *smc_accept_dequeue(struct sock *parent,
+ struct socket *new_sock)
{
struct smc_sock *isk, *n;
struct sock *new_sk;
@@ -581,11 +606,17 @@ static struct sock *smc_accept_dequeue(struct sock *parent,
}
/* clean up for a created but never accepted sock */
-static void smc_close_non_accepted(struct sock *sk)
+void smc_close_non_accepted(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
sock_hold(sk);
+ lock_sock(sk);
+ if (!sk->sk_lingertime)
+ /* wait for peer closing */
+ sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
+ if (!smc->use_fallback)
+ smc_close_active(smc);
if (smc->clcsock) {
struct socket *tcp;
@@ -593,7 +624,16 @@ static void smc_close_non_accepted(struct sock *sk)
smc->clcsock = NULL;
sock_release(tcp);
}
- /* more closing stuff to be added with socket closing patch */
+ sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ if (smc->use_fallback) {
+ schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
+ } else {
+ smc_conn_free(&smc->conn);
+ schedule_delayed_work(&smc->sock_put_work,
+ SMC_CLOSE_SOCK_PUT_DELAY);
+ }
+ release_sock(sk);
sock_put(sk);
}
@@ -761,11 +801,12 @@ static void smc_listen_work(struct work_struct *work)
out_connected:
sk_refcnt_debug_inc(newsmcsk);
- newsmcsk->sk_state = SMC_ACTIVE;
+ if (newsmcsk->sk_state == SMC_INIT)
+ newsmcsk->sk_state = SMC_ACTIVE;
enqueue:
if (local_contact == SMC_FIRST_CONTACT)
mutex_unlock(&smc_create_lgr_pending);
- lock_sock(&lsmc->sk);
+ lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
if (lsmc->sk.sk_state == SMC_LISTEN) {
smc_accept_enqueue(&lsmc->sk, newsmcsk);
} else { /* no longer listening */
@@ -791,6 +832,7 @@ decline_rdma:
out_err:
newsmcsk->sk_state = SMC_CLOSED;
+ smc_conn_free(&new_smc->conn);
goto enqueue; /* queue new sock with sk_err set */
}
@@ -911,7 +953,8 @@ static int smc_getname(struct socket *sock, struct sockaddr *addr,
{
struct smc_sock *smc;
- if (peer && (sock->sk->sk_state != SMC_ACTIVE))
+ if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
+ (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
return -ENOTCONN;
smc = smc_sk(sock->sk);
@@ -927,7 +970,9 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
smc = smc_sk(sk);
lock_sock(sk);
- if (sk->sk_state != SMC_ACTIVE)
+ if ((sk->sk_state != SMC_ACTIVE) &&
+ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+ (sk->sk_state != SMC_INIT))
goto out;
if (smc->use_fallback)
rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
@@ -947,13 +992,21 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
smc = smc_sk(sk);
lock_sock(sk);
- if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED))
+ if ((sk->sk_state == SMC_INIT) ||
+ (sk->sk_state == SMC_LISTEN) ||
+ (sk->sk_state == SMC_CLOSED))
+ goto out;
+
+ if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
+ rc = 0;
goto out;
+ }
if (smc->use_fallback)
rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
else
rc = smc_rx_recvmsg(smc, msg, len, flags);
+
out:
release_sock(sk);
return rc;
@@ -1013,7 +1066,8 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
mask |= smc_accept_poll(sk);
if (sk->sk_err)
mask |= POLLERR;
- if (atomic_read(&smc->conn.sndbuf_space)) {
+ if (atomic_read(&smc->conn.sndbuf_space) ||
+ (sk->sk_shutdown & SEND_SHUTDOWN)) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -1021,7 +1075,14 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
- /* for now - to be enhanced in follow-on patch */
+ if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
+ (sk->sk_state == SMC_CLOSED))
+ mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+ if (sk->sk_state == SMC_APPCLOSEWAIT1)
+ mask |= POLLIN;
+
}
return mask;
@@ -1032,31 +1093,53 @@ static int smc_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
struct smc_sock *smc;
int rc = -EINVAL;
+ int rc1 = 0;
smc = smc_sk(sk);
if ((how < SHUT_RD) || (how > SHUT_RDWR))
- goto out_err;
+ return rc;
lock_sock(sk);
rc = -ENOTCONN;
- if (sk->sk_state == SMC_CLOSED)
+ if ((sk->sk_state != SMC_LISTEN) &&
+ (sk->sk_state != SMC_ACTIVE) &&
+ (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
+ (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
+ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+ (sk->sk_state != SMC_APPCLOSEWAIT2) &&
+ (sk->sk_state != SMC_APPFINCLOSEWAIT))
goto out;
if (smc->use_fallback) {
rc = kernel_sock_shutdown(smc->clcsock, how);
sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
if (sk->sk_shutdown == SHUTDOWN_MASK)
sk->sk_state = SMC_CLOSED;
- } else {
- rc = sock_no_shutdown(sock, how);
+ goto out;
+ }
+ switch (how) {
+ case SHUT_RDWR: /* shutdown in both directions */
+ rc = smc_close_active(smc);
+ break;
+ case SHUT_WR:
+ rc = smc_close_shutdown_write(smc);
+ break;
+ case SHUT_RD:
+ if (sk->sk_state == SMC_LISTEN)
+ rc = smc_close_active(smc);
+ else
+ rc = 0;
+ /* nothing more to do because peer is not involved */
+ break;
}
+ rc1 = kernel_sock_shutdown(smc->clcsock, how);
+ /* map sock_shutdown_cmd constants to sk_shutdown value range */
+ sk->sk_shutdown |= how + 1;
out:
release_sock(sk);
-
-out_err:
- return rc;
+ return rc ? rc : rc1;
}
static int smc_setsockopt(struct socket *sock, int level, int optname,