summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-10-08 15:44:50 -0700
committerJakub Kicinski <kuba@kernel.org>2020-10-08 15:44:50 -0700
commit9d49aea13f1e35869158abe7e314e16dc8f50ff1 (patch)
treec878d774db35fdb9834621e2f354973dc09f2702 /net
parent9faebeb2d80065926dfbc09cb73b1bb7779a89cd (diff)
parent3fdd47c3b40ac48e6e6e5904cf24d12e6e073a96 (diff)
downloadlinux-9d49aea13f1e35869158abe7e314e16dc8f50ff1.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Small conflict around locking in rxrpc_process_event() - channel_lock moved to bundle in next, while state lock needs _bh() from net. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netlink.c26
-rw-r--r--net/core/filter.c6
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/mptcp/options.c10
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/openvswitch/conntrack.c22
-rw-r--r--net/qrtr/ns.c76
-rw-r--r--net/rxrpc/ar-internal.h7
-rw-r--r--net/rxrpc/call_accept.c263
-rw-r--r--net/rxrpc/call_object.c5
-rw-r--r--net/rxrpc/conn_event.c8
-rw-r--r--net/rxrpc/key.c20
-rw-r--r--net/rxrpc/recvmsg.c36
-rw-r--r--net/rxrpc/sendmsg.c15
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/wireless/nl80211.c3
16 files changed, 172 insertions, 334 deletions
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 8a71c60fa357..92d64abffa87 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -380,6 +380,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
u32 filter_mask, const struct net_device *dev)
{
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
@@ -423,11 +424,18 @@ static int br_fill_ifinfo(struct sk_buff *skb,
nla_nest_end(skb, nest);
}
+ if (filter_mask & (RTEXT_FILTER_BRVLAN |
+ RTEXT_FILTER_BRVLAN_COMPRESSED |
+ RTEXT_FILTER_MRP)) {
+ af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
+ if (!af)
+ goto nla_put_failure;
+ }
+
/* Check if the VID information is requested */
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
struct net_bridge_vlan_group *vg;
- struct nlattr *af;
int err;
/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
@@ -441,11 +449,6 @@ static int br_fill_ifinfo(struct sk_buff *skb,
rcu_read_unlock();
goto done;
}
- af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
- if (!af) {
- rcu_read_unlock();
- goto nla_put_failure;
- }
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
err = br_fill_ifvlaninfo_compressed(skb, vg);
else
@@ -456,32 +459,25 @@ static int br_fill_ifinfo(struct sk_buff *skb,
rcu_read_unlock();
if (err)
goto nla_put_failure;
-
- nla_nest_end(skb, af);
}
if (filter_mask & RTEXT_FILTER_MRP) {
- struct nlattr *af;
int err;
if (!br_mrp_enabled(br) || port)
goto done;
- af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
- if (!af)
- goto nla_put_failure;
-
rcu_read_lock();
err = br_mrp_fill_info(skb, br);
rcu_read_unlock();
if (err)
goto nla_put_failure;
-
- nla_nest_end(skb, af);
}
done:
+ if (af)
+ nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/core/filter.c b/net/core/filter.c
index 3fb6adad1957..bc6bd2b323e8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10203,6 +10203,12 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
{
+ /* BTF types for tcp_timewait_sock and inet_timewait_sock are not
+ * generated if CONFIG_INET=n. Trigger an explicit generation here.
+ */
+ BTF_TYPE_EMIT(struct inet_timewait_sock);
+ BTF_TYPE_EMIT(struct tcp_timewait_sock);
+
#ifdef CONFIG_INET
if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
return (unsigned long)sk;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ace48b2790ff..7352c097ae48 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1798,12 +1798,12 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
__skb_pull(skb, hdrlen);
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
- thtail->window = th->window;
-
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
- if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
+ if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
+ thtail->window = th->window;
+ }
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
* thtail->fin, so that the fast path in tcp_rcv_established()
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 4055769e4fde..4d35fa998fcc 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -452,7 +452,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
struct sk_buff *skb, struct mptcp_ext *ext)
{
- u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq);
+ /* The write_seq value has already been incremented, so the actual
+ * sequence number for the DATA_FIN is one less.
+ */
+ u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
if (!ext->use_map || !skb->len) {
/* RFC6824 requires a DSS mapping with specific values
@@ -461,10 +464,7 @@ static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
ext->data_fin = 1;
ext->use_map = 1;
ext->dsn64 = 1;
- /* The write_seq value has already been incremented, so
- * the actual sequence number for the DATA_FIN is one less.
- */
- ext->data_seq = data_fin_tx_seq - 1;
+ ext->data_seq = data_fin_tx_seq;
ext->subflow_seq = 0;
ext->data_len = 1;
} else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 5ca8032e0d24..5d91e3a2cd30 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -750,7 +750,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
return MAPPING_DATA_FIN;
}
} else {
- u64 data_fin_seq = mpext->data_seq + data_len;
+ u64 data_fin_seq = mpext->data_seq + data_len - 1;
/* If mpext->data_seq is a 32-bit value, data_fin_seq
* must also be limited to 32 bits.
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e6fe26a9c892..4beb96139d77 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -905,15 +905,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
}
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
- if (err == NF_ACCEPT &&
- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
- if (maniptype == NF_NAT_MANIP_SRC)
- maniptype = NF_NAT_MANIP_DST;
- else
- maniptype = NF_NAT_MANIP_SRC;
-
- err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
- maniptype);
+ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+ if (ct->status & IPS_SRC_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
+ maniptype);
+ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
+ NF_NAT_MANIP_SRC);
+ }
}
/* Mark NAT done if successful and update the flow key. */
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 934999b56d60..b8559c882431 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -193,7 +193,7 @@ static int announce_servers(struct sockaddr_qrtr *sq)
struct qrtr_server *srv;
struct qrtr_node *node;
void __rcu **slot;
- int ret = 0;
+ int ret;
node = node_get(qrtr_ns.local_node);
if (!node)
@@ -203,18 +203,27 @@ static int announce_servers(struct sockaddr_qrtr *sq)
/* Announce the list of servers registered in this node */
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
srv = radix_tree_deref_slot(slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
+ rcu_read_unlock();
ret = service_announce_new(sq, srv);
if (ret < 0) {
pr_err("failed to announce new service\n");
- goto err_out;
+ return ret;
}
+
+ rcu_read_lock();
}
-err_out:
rcu_read_unlock();
- return ret;
+ return 0;
}
static struct qrtr_server *server_add(unsigned int service,
@@ -339,7 +348,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
struct qrtr_node *node;
void __rcu **slot;
struct kvec iv;
- int ret = 0;
+ int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
@@ -352,7 +361,16 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
/* Advertise removal of this client to all servers of remote node */
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
srv = radix_tree_deref_slot(slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
+ rcu_read_unlock();
server_del(node, srv->port);
+ rcu_read_lock();
}
rcu_read_unlock();
@@ -368,6 +386,14 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
rcu_read_lock();
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
srv = radix_tree_deref_slot(slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
+ rcu_read_unlock();
sq.sq_family = AF_QIPCRTR;
sq.sq_node = srv->node;
@@ -379,14 +405,14 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0) {
pr_err("failed to send bye cmd\n");
- goto err_out;
+ return ret;
}
+ rcu_read_lock();
}
-err_out:
rcu_read_unlock();
- return ret;
+ return 0;
}
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
@@ -404,7 +430,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
struct list_head *li;
void __rcu **slot;
struct kvec iv;
- int ret = 0;
+ int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
@@ -447,6 +473,14 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
rcu_read_lock();
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
srv = radix_tree_deref_slot(slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
+ rcu_read_unlock();
sq.sq_family = AF_QIPCRTR;
sq.sq_node = srv->node;
@@ -458,14 +492,14 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0) {
pr_err("failed to send del client cmd\n");
- goto err_out;
+ return ret;
}
+ rcu_read_lock();
}
-err_out:
rcu_read_unlock();
- return ret;
+ return 0;
}
static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
@@ -571,16 +605,34 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
rcu_read_lock();
radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
node = radix_tree_deref_slot(node_slot);
+ if (!node)
+ continue;
+ if (radix_tree_deref_retry(node)) {
+ node_slot = radix_tree_iter_retry(&node_iter);
+ continue;
+ }
+ node_slot = radix_tree_iter_resume(node_slot, &node_iter);
radix_tree_for_each_slot(srv_slot, &node->servers,
&srv_iter, 0) {
struct qrtr_server *srv;
srv = radix_tree_deref_slot(srv_slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ srv_slot = radix_tree_iter_retry(&srv_iter);
+ continue;
+ }
+
if (!server_match(srv, &filter))
continue;
+ srv_slot = radix_tree_iter_resume(srv_slot, &srv_iter);
+
+ rcu_read_unlock();
lookup_notify(from, srv, true);
+ rcu_read_lock();
}
}
rcu_read_unlock();
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 19f714386654..c9287b6551df 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -514,7 +514,6 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
- RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
@@ -710,8 +709,8 @@ struct rxrpc_ack_summary {
enum rxrpc_command {
RXRPC_CMD_SEND_DATA, /* send data message */
RXRPC_CMD_SEND_ABORT, /* request abort generation */
- RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
+ RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
};
struct rxrpc_call_params {
@@ -752,9 +751,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
struct rxrpc_sock *,
struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
-struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
- rxrpc_notify_rx_t);
-int rxrpc_reject_call(struct rxrpc_sock *);
+int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
/*
* call_event.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index ef160566aa9a..8df1964db333 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
unsigned int debug_id)
{
const void *here = __builtin_return_address(0);
- struct rxrpc_call *call;
+ struct rxrpc_call *call, *xcall;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
+ struct rb_node *parent, **pp;
int max, tmp;
unsigned int size = RXRPC_BACKLOG_MAX;
unsigned int head, tail, call_head, call_tail;
@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
}
/* Now it gets complicated, because calls get registered with the
- * socket here, particularly if a user ID is preassigned by the user.
+ * socket here, with a user ID preassigned by the user.
*/
call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
@@ -107,34 +108,33 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
here, (const void *)user_call_ID);
write_lock(&rx->call_lock);
- if (user_attach_call) {
- struct rxrpc_call *xcall;
- struct rb_node *parent, **pp;
-
- /* Check the user ID isn't already in use */
- pp = &rx->calls.rb_node;
- parent = NULL;
- while (*pp) {
- parent = *pp;
- xcall = rb_entry(parent, struct rxrpc_call, sock_node);
- if (user_call_ID < xcall->user_call_ID)
- pp = &(*pp)->rb_left;
- else if (user_call_ID > xcall->user_call_ID)
- pp = &(*pp)->rb_right;
- else
- goto id_in_use;
- }
- call->user_call_ID = user_call_ID;
- call->notify_rx = notify_rx;
+ /* Check the user ID isn't already in use */
+ pp = &rx->calls.rb_node;
+ parent = NULL;
+ while (*pp) {
+ parent = *pp;
+ xcall = rb_entry(parent, struct rxrpc_call, sock_node);
+ if (user_call_ID < xcall->user_call_ID)
+ pp = &(*pp)->rb_left;
+ else if (user_call_ID > xcall->user_call_ID)
+ pp = &(*pp)->rb_right;
+ else
+ goto id_in_use;
+ }
+
+ call->user_call_ID = user_call_ID;
+ call->notify_rx = notify_rx;
+ if (user_attach_call) {
rxrpc_get_call(call, rxrpc_call_got_kernel);
user_attach_call(call, user_call_ID);
- rxrpc_get_call(call, rxrpc_call_got_userid);
- rb_link_node(&call->sock_node, parent, pp);
- rb_insert_color(&call->sock_node, &rx->calls);
- set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
}
+ rxrpc_get_call(call, rxrpc_call_got_userid);
+ rb_link_node(&call->sock_node, parent, pp);
+ rb_insert_color(&call->sock_node, &rx->calls);
+ set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+
list_add(&call->sock_link, &rx->sock_calls);
write_unlock(&rx->call_lock);
@@ -157,11 +157,8 @@ id_in_use:
}
/*
- * Preallocate sufficient service connections, calls and peers to cover the
- * entire backlog of a socket. When a new call comes in, if we don't have
- * sufficient of each available, the call gets rejected as busy or ignored.
- *
- * The backlog is replenished when a connection is accepted or rejected.
+ * Allocate the preallocation buffers for incoming service calls. These must
+ * be charged manually.
*/
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
@@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
rx->backlog = b;
}
- if (rx->discard_new_call)
- return 0;
-
- while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
- atomic_inc_return(&rxrpc_debug_id)) == 0)
- ;
-
return 0;
}
@@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
rxrpc_see_call(call);
call->conn = conn;
call->security = conn->security;
+ call->security_ix = conn->security_ix;
call->peer = rxrpc_get_peer(conn->params.peer);
call->cong_cwnd = call->peer->cong_cwnd;
return call;
@@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
if (rx->notify_new_call)
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
- else
- sk_acceptq_added(&rx->sk);
spin_lock(&conn->state_lock);
switch (conn->state) {
@@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE) {
- if (rx->discard_new_call)
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- else
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- }
+ if (call->state < RXRPC_CALL_COMPLETE)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
write_unlock(&call->state_lock);
break;
@@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
rxrpc_send_ping(call, skb);
- if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
- rxrpc_notify_socket(call);
-
/* We have to discard the prealloc queue's ref here and rely on a
* combination of the RCU read lock and refs held either by the socket
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
@@ -460,187 +442,18 @@ no_call:
}
/*
- * handle acceptance of a call by userspace
- * - assign the user call ID to the call at the front of the queue
- * - called with the socket locked.
+ * Charge up socket with preallocated calls, attaching user call IDs.
*/
-struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
- unsigned long user_call_ID,
- rxrpc_notify_rx_t notify_rx)
- __releases(&rx->sk.sk_lock.slock)
- __acquires(call->user_mutex)
+int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
{
- struct rxrpc_call *call;
- struct rb_node *parent, **pp;
- int ret;
-
- _enter(",%lx", user_call_ID);
-
- ASSERT(!irqs_disabled());
-
- write_lock(&rx->call_lock);
-
- if (list_empty(&rx->to_be_accepted)) {
- write_unlock(&rx->call_lock);
- release_sock(&rx->sk);
- kleave(" = -ENODATA [empty]");
- return ERR_PTR(-ENODATA);
- }
-
- /* check the user ID isn't already in use */
- pp = &rx->calls.rb_node;
- parent = NULL;
- while (*pp) {
- parent = *pp;
- call = rb_entry(parent, struct rxrpc_call, sock_node);
-
- if (user_call_ID < call->user_call_ID)
- pp = &(*pp)->rb_left;
- else if (user_call_ID > call->user_call_ID)
- pp = &(*pp)->rb_right;
- else
- goto id_in_use;
- }
-
- /* Dequeue the first call and check it's still valid. We gain
- * responsibility for the queue's reference.
- */
- call = list_entry(rx->to_be_accepted.next,
- struct rxrpc_call, accept_link);
- write_unlock(&rx->call_lock);
-
- /* We need to gain the mutex from the interrupt handler without
- * upsetting lockdep, so we have to release it there and take it here.
- * We are, however, still holding the socket lock, so other accepts
- * must wait for us and no one can add the user ID behind our backs.
- */
- if (mutex_lock_interruptible(&call->user_mutex) < 0) {
- release_sock(&rx->sk);
- kleave(" = -ERESTARTSYS");
- return ERR_PTR(-ERESTARTSYS);
- }
-
- write_lock(&rx->call_lock);
- list_del_init(&call->accept_link);
- sk_acceptq_removed(&rx->sk);
- rxrpc_see_call(call);
-
- /* Find the user ID insertion point. */
- pp = &rx->calls.rb_node;
- parent = NULL;
- while (*pp) {
- parent = *pp;
- call = rb_entry(parent, struct rxrpc_call, sock_node);
-
- if (user_call_ID < call->user_call_ID)
- pp = &(*pp)->rb_left;
- else if (user_call_ID > call->user_call_ID)
- pp = &(*pp)->rb_right;
- else
- BUG();
- }
-
- write_lock_bh(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_SERVER_ACCEPTING:
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- break;
- case RXRPC_CALL_COMPLETE:
- ret = call->error;
- goto out_release;
- default:
- BUG();
- }
-
- /* formalise the acceptance */
- call->notify_rx = notify_rx;
- call->user_call_ID = user_call_ID;
- rxrpc_get_call(call, rxrpc_call_got_userid);
- rb_link_node(&call->sock_node, parent, pp);
- rb_insert_color(&call->sock_node, &rx->calls);
- if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
- BUG();
-
- write_unlock_bh(&call->state_lock);
- write_unlock(&rx->call_lock);
- rxrpc_notify_socket(call);
- rxrpc_service_prealloc(rx, GFP_KERNEL);
- release_sock(&rx->sk);
- _leave(" = %p{%d}", call, call->debug_id);
- return call;
-
-out_release:
- _debug("release %p", call);
- write_unlock_bh(&call->state_lock);
- write_unlock(&rx->call_lock);
- rxrpc_release_call(rx, call);
- rxrpc_put_call(call, rxrpc_call_put);
- goto out;
-
-id_in_use:
- ret = -EBADSLT;
- write_unlock(&rx->call_lock);
-out:
- rxrpc_service_prealloc(rx, GFP_KERNEL);
- release_sock(&rx->sk);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
-}
-
-/*
- * Handle rejection of a call by userspace
- * - reject the call at the front of the queue
- */
-int rxrpc_reject_call(struct rxrpc_sock *rx)
-{
- struct rxrpc_call *call;
- bool abort = false;
- int ret;
-
- _enter("");
-
- ASSERT(!irqs_disabled());
-
- write_lock(&rx->call_lock);
-
- if (list_empty(&rx->to_be_accepted)) {
- write_unlock(&rx->call_lock);
- return -ENODATA;
- }
-
- /* Dequeue the first call and check it's still valid. We gain
- * responsibility for the queue's reference.
- */
- call = list_entry(rx->to_be_accepted.next,
- struct rxrpc_call, accept_link);
- list_del_init(&call->accept_link);
- sk_acceptq_removed(&rx->sk);
- rxrpc_see_call(call);
+ struct rxrpc_backlog *b = rx->backlog;
- write_lock_bh(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_SERVER_ACCEPTING:
- __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
- abort = true;
- fallthrough;
- case RXRPC_CALL_COMPLETE:
- ret = call->error;
- goto out_discard;
- default:
- BUG();
- }
+ if (rx->sk.sk_state == RXRPC_CLOSE)
+ return -ESHUTDOWN;
-out_discard:
- write_unlock_bh(&call->state_lock);
- write_unlock(&rx->call_lock);
- if (abort) {
- rxrpc_send_abort_packet(call);
- rxrpc_release_call(rx, call);
- rxrpc_put_call(call, rxrpc_call_put);
- }
- rxrpc_service_prealloc(rx, GFP_KERNEL);
- _leave(" = %d", ret);
- return ret;
+ return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
+ GFP_KERNEL,
+ atomic_inc_return(&rxrpc_debug_id));
}
/*
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index c8015c76a81c..c845594b663f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -23,7 +23,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
- [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
@@ -393,9 +392,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
call->call_id = sp->hdr.callNumber;
call->service_id = sp->hdr.serviceId;
call->cid = sp->hdr.cid;
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- if (sp->hdr.securityIndex > 0)
- call->state = RXRPC_CALL_SERVER_SECURING;
+ call->state = RXRPC_CALL_SERVER_SECURING;
call->cong_tstamp = skb->tstamp;
/* Set the channel for this call. We don't get channel_lock as we're
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 0628dad2bdea..6b7c6f4a82e3 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -270,7 +270,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
if (call) {
write_lock_bh(&call->state_lock);
if (call->state == RXRPC_CALL_SERVER_SECURING) {
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
rxrpc_notify_socket(call);
}
write_unlock_bh(&call->state_lock);
@@ -342,18 +342,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret;
spin_lock(&conn->bundle->channel_lock);
- spin_lock(&conn->state_lock);
+ spin_lock_bh(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE;
- spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure(
rcu_dereference_protected(
conn->channels[loop].call,
lockdep_is_held(&conn->bundle->channel_lock)));
} else {
- spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&conn->state_lock);
}
spin_unlock(&conn->bundle->channel_lock);
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 94c3df392651..2e8bd3b97301 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
_enter("");
- if (optlen <= 0 || optlen > PAGE_SIZE - 1)
+ if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities)
return -EINVAL;
description = memdup_sockptr_nul(optval, optlen);
@@ -940,7 +940,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
if (IS_ERR(description))
return PTR_ERR(description);
- key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL);
+ key = request_key(&key_type_keyring, description, NULL);
if (IS_ERR(key)) {
kfree(description);
_leave(" = %ld", PTR_ERR(key));
@@ -1072,7 +1072,7 @@ static long rxrpc_read(const struct key *key,
switch (token->security_index) {
case RXRPC_SECURITY_RXKAD:
- toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin,
+ toksize += 8 * 4; /* viceid, kvno, key*2, begin,
* end, primary, tktlen */
toksize += RND(token->kad->ticket_len);
break;
@@ -1107,7 +1107,8 @@ static long rxrpc_read(const struct key *key,
break;
default: /* we have a ticket we can't encode */
- BUG();
+ pr_err("Unsupported key token type (%u)\n",
+ token->security_index);
continue;
}
@@ -1138,6 +1139,14 @@ static long rxrpc_read(const struct key *key,
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
xdr += (_l + 3) >> 2; \
} while(0)
+#define ENCODE_BYTES(l, s) \
+ do { \
+ u32 _l = (l); \
+ memcpy(xdr, (s), _l); \
+ if (_l & 3) \
+ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
+ xdr += (_l + 3) >> 2; \
+ } while(0)
#define ENCODE64(x) \
do { \
__be64 y = cpu_to_be64(x); \
@@ -1165,7 +1174,7 @@ static long rxrpc_read(const struct key *key,
case RXRPC_SECURITY_RXKAD:
ENCODE(token->kad->vice_id);
ENCODE(token->kad->kvno);
- ENCODE_DATA(8, token->kad->session_key);
+ ENCODE_BYTES(8, token->kad->session_key);
ENCODE(token->kad->start);
ENCODE(token->kad->expiry);
ENCODE(token->kad->primary_flag);
@@ -1215,7 +1224,6 @@ static long rxrpc_read(const struct key *key,
break;
default:
- BUG();
break;
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index c4684dde1f16..2c842851d72e 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -179,37 +179,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
}
/*
- * Pass back notification of a new call. The call is added to the
- * to-be-accepted list. This means that the next call to be accepted might not
- * be the last call seen awaiting acceptance, but unless we leave this on the
- * front of the queue and block all other messages until someone gives us a
- * user_ID for it, there's not a lot we can do.
- */
-static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct msghdr *msg, int flags)
-{
- int tmp = 0, ret;
-
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
-
- if (ret == 0 && !(flags & MSG_PEEK)) {
- _debug("to be accepted");
- write_lock_bh(&rx->recvmsg_lock);
- list_del_init(&call->recvmsg_link);
- write_unlock_bh(&rx->recvmsg_lock);
-
- rxrpc_get_call(call, rxrpc_call_got);
- write_lock(&rx->call_lock);
- list_add_tail(&call->accept_link, &rx->to_be_accepted);
- write_unlock(&rx->call_lock);
- }
-
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
- return ret;
-}
-
-/*
* End the packet reception phase.
*/
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
@@ -630,9 +599,6 @@ try_again:
}
switch (READ_ONCE(call->state)) {
- case RXRPC_CALL_SERVER_ACCEPTING:
- ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
- break;
case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST:
@@ -728,7 +694,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
call->debug_id, rxrpc_call_states[call->state],
iov_iter_count(iter), want_more);
- ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
+ ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
mutex_lock(&call->user_mutex);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 0824e103d037..d27140c836cc 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -530,10 +530,10 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
return -EINVAL;
break;
- case RXRPC_ACCEPT:
+ case RXRPC_CHARGE_ACCEPT:
if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
- p->command = RXRPC_CMD_ACCEPT;
+ p->command = RXRPC_CMD_CHARGE_ACCEPT;
if (len != 0)
return -EINVAL;
break;
@@ -659,16 +659,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (ret < 0)
goto error_release_sock;
- if (p.command == RXRPC_CMD_ACCEPT) {
+ if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock;
- call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
- /* The socket is now unlocked. */
- if (IS_ERR(call))
- return PTR_ERR(call);
- ret = 0;
- goto out_put_unlock;
+ ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
+ goto error_release_sock;
}
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
@@ -690,7 +686,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
case RXRPC_CALL_CLIENT_AWAIT_CONN:
case RXRPC_CALL_SERVER_PREALLOC:
case RXRPC_CALL_SERVER_SECURING:
- case RXRPC_CALL_SERVER_ACCEPTING:
rxrpc_put_call(call, rxrpc_call_put);
ret = -EBUSY;
goto error_release_sock;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 589868d96e3f..6f8319b828b0 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -494,6 +494,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
out_err:
/* Clean up any successful allocations */
sctp_auth_destroy_hmacs(ep->auth_hmacs);
+ ep->auth_hmacs = NULL;
return -ENOMEM;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 59b4677cc587..554796a6c6fe 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4259,6 +4259,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ if (key.idx < 0)
+ return -EINVAL;
+
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);