From 9f0cadc32d738f0f0c8e30be83be7087c7b85ee5 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 16 Apr 2020 17:45:44 +0200 Subject: xfrm: espintcp: save and call old ->sk_destruct When ESP encapsulation is enabled on a TCP socket, I'm replacing the existing ->sk_destruct callback with espintcp_destruct. We still need to call the old callback to perform the other cleanups when the socket is destroyed. Save the old callback, and call it from espintcp_destruct. Fixes: e27cca96cd68 ("xfrm: add espintcp (RFC 8229)") Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- include/net/espintcp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/net') diff --git a/include/net/espintcp.h b/include/net/espintcp.h index dd7026a00066..0335bbd76552 100644 --- a/include/net/espintcp.h +++ b/include/net/espintcp.h @@ -25,6 +25,7 @@ struct espintcp_ctx { struct espintcp_msg partial; void (*saved_data_ready)(struct sock *sk); void (*saved_write_space)(struct sock *sk); + void (*saved_destruct)(struct sock *sk); struct work_struct work; bool tx_running; }; -- cgit v1.2.3 From 0cada33241d9de205522e3858b18e506ca5cce2c Mon Sep 17 00:00:00 2001 From: Vinay Kumar Yadav Date: Sat, 23 May 2020 01:40:31 +0530 Subject: net/tls: fix race condition causing kernel panic tls_sw_recvmsg() and tls_decrypt_done() can be run concurrently. // tls_sw_recvmsg() if (atomic_read(&ctx->decrypt_pending)) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); else reinit_completion(&ctx->async_wait.completion); //tls_decrypt_done() pending = atomic_dec_return(&ctx->decrypt_pending); if (!pending && READ_ONCE(ctx->async_notify)) complete(&ctx->async_wait.completion); Consider the scenario tls_decrypt_done() is about to run complete() if (!pending && READ_ONCE(ctx->async_notify)) and tls_sw_recvmsg() reads decrypt_pending == 0, does reinit_completion(), then tls_decrypt_done() runs complete(). This sequence of execution results in wrong completion. Consequently, for next decrypt request, it will not wait for completion, eventually on connection close, crypto resources freed, there is no way to handle pending decrypt response. This race condition can be avoided by having atomic_read() mutually exclusive with atomic_dec_return(),complete().Intoduced spin lock to ensure the mutual exclution. Addressed similar problem in tx direction. v1->v2: - More readable commit message. - Corrected the lock to fix new race scenario. - Removed barrier which is not needed now. Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") Signed-off-by: Vinay Kumar Yadav Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/tls.h | 4 ++++ net/tls/tls_sw.c | 33 +++++++++++++++++++++++++++------ 2 files changed, 31 insertions(+), 6 deletions(-) (limited to 'include/net') diff --git a/include/net/tls.h b/include/net/tls.h index bf9eb4823933..18cd4f418464 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -135,6 +135,8 @@ struct tls_sw_context_tx { struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; + /* protect crypto_wait with encrypt_pending */ + spinlock_t encrypt_compl_lock; int async_notify; u8 async_capable:1; @@ -155,6 +157,8 @@ struct tls_sw_context_rx { u8 async_capable:1; u8 decrypted:1; atomic_t decrypt_pending; + /* protect crypto_wait with decrypt_pending*/ + spinlock_t decrypt_compl_lock; bool async_notify; }; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 2d399b6c4075..8c2763eb6aae 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -206,10 +206,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) kfree(aead_req); + spin_lock_bh(&ctx->decrypt_compl_lock); pending = atomic_dec_return(&ctx->decrypt_pending); - if (!pending && READ_ONCE(ctx->async_notify)) + if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); + spin_unlock_bh(&ctx->decrypt_compl_lock); } static int tls_do_decryption(struct sock *sk, @@ -467,10 +469,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) ready = true; } + spin_lock_bh(&ctx->encrypt_compl_lock); pending = atomic_dec_return(&ctx->encrypt_pending); - if (!pending && READ_ONCE(ctx->async_notify)) + if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); + spin_unlock_bh(&ctx->encrypt_compl_lock); if (!ready) return; @@ -929,6 +933,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int num_zc = 0; int orig_size; int ret = 0; + int pending; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -EOPNOTSUPP; @@ -1095,13 +1100,19 @@ trim_sgl: goto send_end; } else if (num_zc) { /* Wait for pending encryptions to get completed */ - smp_store_mb(ctx->async_notify, true); + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; - if (atomic_read(&ctx->encrypt_pending)) + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + if (pending) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); else reinit_completion(&ctx->async_wait.completion); + /* There can be no concurrent accesses, since we have no + * pending encrypt operations + */ WRITE_ONCE(ctx->async_notify, false); if (ctx->async_wait.err) { @@ -1732,6 +1743,7 @@ int tls_sw_recvmsg(struct sock *sk, bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool is_peek = flags & MSG_PEEK; int num_async = 0; + int pending; flags |= nonblock; @@ -1894,8 +1906,11 @@ pick_next_record: recv_end: if (num_async) { /* Wait for all previously submitted records to be decrypted */ - smp_store_mb(ctx->async_notify, true); - if (atomic_read(&ctx->decrypt_pending)) { + spin_lock_bh(&ctx->decrypt_compl_lock); + ctx->async_notify = true; + pending = atomic_read(&ctx->decrypt_pending); + spin_unlock_bh(&ctx->decrypt_compl_lock); + if (pending) { err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); if (err) { /* one of async decrypt failed */ @@ -1907,6 +1922,10 @@ recv_end: } else { reinit_completion(&ctx->async_wait.completion); } + + /* There can be no concurrent accesses, since we have no + * pending decrypt operations + */ WRITE_ONCE(ctx->async_notify, false); /* Drain records from the rx_list & copy if required */ @@ -2293,6 +2312,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) if (tx) { crypto_init_wait(&sw_ctx_tx->async_wait); + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; @@ -2301,6 +2321,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) sw_ctx_tx->tx_work.sk = sk; } else { crypto_init_wait(&sw_ctx_rx->async_wait); + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; skb_queue_head_init(&sw_ctx_rx->rx_list); -- cgit v1.2.3 From 90f33bffa382598a32cc82abfeb20adc92d041b6 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 26 May 2020 12:56:15 -0600 Subject: nexthops: don't modify published nexthop groups We must avoid modifying published nexthop groups while they might be in use, otherwise we might see NULL ptr dereferences. In order to do that we allocate 2 nexthoup group structures upon nexthop creation and swap between them when we have to delete an entry. The reason is that we can't fail nexthop group removal, so we can't handle allocation failure thus we move the extra allocation on creation where we can safely fail and return ENOMEM. Fixes: 430a049190de ("nexthop: Add support for nexthop groups") Signed-off-by: Nikolay Aleksandrov Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/nexthop.h | 1 + net/ipv4/nexthop.c | 91 ++++++++++++++++++++++++++++++++------------------- 2 files changed, 59 insertions(+), 33 deletions(-) (limited to 'include/net') diff --git a/include/net/nexthop.h b/include/net/nexthop.h index c440ccc861fc..8a343519ed7a 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -70,6 +70,7 @@ struct nh_grp_entry { }; struct nh_group { + struct nh_group *spare; /* spare group for removals */ u16 num_nh; bool mpath; bool has_v4; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index b4b772f120a7..563f71bcb2d7 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -63,9 +63,16 @@ static void nexthop_free_mpath(struct nexthop *nh) int i; nhg = rcu_dereference_raw(nh->nh_grp); - for (i = 0; i < nhg->num_nh; ++i) - WARN_ON(nhg->nh_entries[i].nh); + for (i = 0; i < nhg->num_nh; ++i) { + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; + WARN_ON(!list_empty(&nhge->nh_list)); + nexthop_put(nhge->nh); + } + + WARN_ON(nhg->spare == nhg); + + kfree(nhg->spare); kfree(nhg); } @@ -697,46 +704,53 @@ static void nh_group_rebalance(struct nh_group *nhg) static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, struct nl_info *nlinfo) { + struct nh_grp_entry *nhges, *new_nhges; struct nexthop *nhp = nhge->nh_parent; struct nexthop *nh = nhge->nh; - struct nh_grp_entry *nhges; - struct nh_group *nhg; - bool found = false; - int i; + struct nh_group *nhg, *newg; + int i, j; WARN_ON(!nh); - list_del(&nhge->nh_list); - nhg = rtnl_dereference(nhp->nh_grp); - nhges = nhg->nh_entries; - for (i = 0; i < nhg->num_nh; ++i) { - if (found) { - nhges[i-1].nh = nhges[i].nh; - nhges[i-1].weight = nhges[i].weight; - list_del(&nhges[i].nh_list); - list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); - } else if (nhg->nh_entries[i].nh == nh) { - found = true; - } - } + newg = nhg->spare; - if (WARN_ON(!found)) + /* last entry, keep it visible and remove the parent */ + if (nhg->num_nh == 1) { + remove_nexthop(net, nhp, nlinfo); return; + } - nhg->num_nh--; - nhg->nh_entries[nhg->num_nh].nh = NULL; + newg->has_v4 = nhg->has_v4; + newg->mpath = nhg->mpath; + newg->num_nh = nhg->num_nh; - nh_group_rebalance(nhg); + /* copy old entries to new except the one getting removed */ + nhges = nhg->nh_entries; + new_nhges = newg->nh_entries; + for (i = 0, j = 0; i < nhg->num_nh; ++i) { + /* current nexthop getting removed */ + if (nhg->nh_entries[i].nh == nh) { + newg->num_nh--; + continue; + } - nexthop_put(nh); + list_del(&nhges[i].nh_list); + new_nhges[j].nh_parent = nhges[i].nh_parent; + new_nhges[j].nh = nhges[i].nh; + new_nhges[j].weight = nhges[i].weight; + list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); + j++; + } + + nh_group_rebalance(newg); + rcu_assign_pointer(nhp->nh_grp, newg); + + list_del(&nhge->nh_list); + nexthop_put(nhge->nh); if (nlinfo) nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); - - /* if this group has no more entries then remove it */ - if (!nhg->num_nh) - remove_nexthop(net, nhp, nlinfo); } static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, @@ -746,6 +760,9 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) remove_nh_grp_entry(net, nhge, nlinfo); + + /* make sure all see the newly published array before releasing rtnl */ + synchronize_rcu(); } static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) @@ -759,10 +776,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) if (WARN_ON(!nhge->nh)) continue; - list_del(&nhge->nh_list); - nexthop_put(nhge->nh); - nhge->nh = NULL; - nhg->num_nh--; + list_del_init(&nhge->nh_list); } } @@ -1085,6 +1099,7 @@ static struct nexthop *nexthop_create_group(struct net *net, { struct nlattr *grps_attr = cfg->nh_grp; struct nexthop_grp *entry = nla_data(grps_attr); + u16 num_nh = nla_len(grps_attr) / sizeof(*entry); struct nh_group *nhg; struct nexthop *nh; int i; @@ -1095,12 +1110,21 @@ static struct nexthop *nexthop_create_group(struct net *net, nh->is_group = 1; - nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); + nhg = nexthop_grp_alloc(num_nh); if (!nhg) { kfree(nh); return ERR_PTR(-ENOMEM); } + /* spare group used for removals */ + nhg->spare = nexthop_grp_alloc(num_nh); + if (!nhg) { + kfree(nhg); + kfree(nh); + return NULL; + } + nhg->spare->spare = nhg; + for (i = 0; i < nhg->num_nh; ++i) { struct nexthop *nhe; struct nh_info *nhi; @@ -1132,6 +1156,7 @@ out_no_nh: for (; i >= 0; --i) nexthop_put(nhg->nh_entries[i].nh); + kfree(nhg->spare); kfree(nhg); kfree(nh); -- cgit v1.2.3 From 0b5e2e39739e861fa5fc84ab27a35dbe62a15330 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 26 May 2020 12:56:16 -0600 Subject: nexthop: Expand nexthop_is_multipath in a few places I got too fancy consolidating checks on multipath type. The result is that path lookups can access 2 different nh_grp structs as exposed by Nik's torture tests. Expand nexthop_is_multipath within nexthop.h to avoid multiple, nh_grp dereferences and make decisions based on the consistent struct. Only 2 places left using nexthop_is_multipath are within IPv6, both only check that the nexthop is a multipath for a branching decision which are acceptable. Fixes: 430a049190de ("nexthop: Add support for nexthop groups") Signed-off-by: David Ahern Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/nexthop.h | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) (limited to 'include/net') diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 8a343519ed7a..f09e8d7d9886 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -137,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) { unsigned int rc = 1; - if (nexthop_is_multipath(nh)) { + if (nh->is_group) { struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - rc = nh_grp->num_nh; + if (nh_grp->mpath) + rc = nh_grp->num_nh; } return rc; } static inline -struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) +struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) { - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); - /* for_nexthops macros in fib_semantics.c grabs a pointer to * the nexthop before checking nhsel */ @@ -186,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh) { const struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - if (nexthop_num_path(nh) > 1) - return false; - nh = nexthop_mpath_select(nh, 0); - if (!nh) + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->num_nh > 1) return false; + + nh = nh_grp->nh_entries[0].nh; } nhi = rcu_dereference_rtnl(nh->nh_info); @@ -217,10 +218,15 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, nhsel); - if (!nh) - return NULL; + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->mpath) { + nh = nexthop_mpath_select(nh_grp, nhsel); + if (!nh) + return NULL; + } } nhi = rcu_dereference_rtnl(nh->nh_info); @@ -264,8 +270,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) { struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, 0); + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + nh = nexthop_mpath_select(nh_grp, 0); if (!nh) return NULL; } -- cgit v1.2.3 From af7888ad9edbd8ba7f6449d1c27ce281ad4b26fd Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 26 May 2020 12:56:17 -0600 Subject: ipv4: Refactor nhc evaluation in fib_table_lookup FIB lookups can return an entry that references an external nexthop. While walking the nexthop struct we do not want to make multiple calls into the nexthop code which can result in 2 different structs getting accessed - one returning the number of paths the rest of the loop seeing a different nh_grp struct. If the nexthop group shrunk, the result is an attempt to access a fib_nh_common that does not exist for the new nh_grp struct but did for the old one. To fix that move the device evaluation code to a helper that can be used for inline fib_nh path as well as external nexthops. Update the existing check for fi->nh in fib_table_lookup to call a new helper, nexthop_get_nhc_lookup, which walks the external nexthop with a single rcu dereference. Fixes: 430a049190de ("nexthop: Add support for nexthop groups") Signed-off-by: David Ahern Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/ip_fib.h | 2 ++ include/net/nexthop.h | 33 +++++++++++++++++++++++++++++++++ net/ipv4/fib_trie.c | 51 ++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 71 insertions(+), 15 deletions(-) (limited to 'include/net') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index b219a8fe0950..771ce068bc96 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -479,6 +479,8 @@ void fib_nh_common_release(struct fib_nh_common *nhc); void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); void fib_trie_init(void); struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); +bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, + const struct flowi4 *flp); static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { diff --git a/include/net/nexthop.h b/include/net/nexthop.h index f09e8d7d9886..9414ae46fc1c 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -233,6 +233,39 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) return &nhi->fib_nhc; } +/* called from fib_table_lookup with rcu_lock */ +static inline +struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, + int fib_flags, + const struct flowi4 *flp, + int *nhsel) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nhg = rcu_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + + nhi = rcu_dereference(nhe->nh_info); + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { + *nhsel = i; + return &nhi->fib_nhc; + } + } + } else { + nhi = rcu_dereference(nh->nh_info); + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { + *nhsel = 0; + return &nhi->fib_nhc; + } + } + + return NULL; +} + static inline unsigned int fib_info_num_path(const struct fib_info *fi) { if (unlikely(fi->nh)) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 4f334b425538..248f1c1959a6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1371,6 +1371,26 @@ static inline t_key prefix_mismatch(t_key key, struct key_vector *n) return (key ^ prefix) & (prefix | -prefix); } +bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, + const struct flowi4 *flp) +{ + if (nhc->nhc_flags & RTNH_F_DEAD) + return false; + + if (ip_ignore_linkdown(nhc->nhc_dev) && + nhc->nhc_flags & RTNH_F_LINKDOWN && + !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) + return false; + + if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { + if (flp->flowi4_oif && + flp->flowi4_oif != nhc->nhc_oif) + return false; + } + + return true; +} + /* should be called with rcu_read_lock */ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags) @@ -1503,6 +1523,7 @@ found: /* Step 3: Process the leaf, if that fails fall back to backtracing */ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; + struct fib_nh_common *nhc; int nhsel, err; if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { @@ -1528,26 +1549,25 @@ out_reject: if (fi->fib_flags & RTNH_F_DEAD) continue; - if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) { - err = fib_props[RTN_BLACKHOLE].error; - goto out_reject; + if (unlikely(fi->nh)) { + if (nexthop_is_blackhole(fi->nh)) { + err = fib_props[RTN_BLACKHOLE].error; + goto out_reject; + } + + nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, + &nhsel); + if (nhc) + goto set_result; + goto miss; } for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { - struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); + nhc = fib_info_nhc(fi, nhsel); - if (nhc->nhc_flags & RTNH_F_DEAD) + if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) continue; - if (ip_ignore_linkdown(nhc->nhc_dev) && - nhc->nhc_flags & RTNH_F_LINKDOWN && - !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) - continue; - if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { - if (flp->flowi4_oif && - flp->flowi4_oif != nhc->nhc_oif) - continue; - } - +set_result: if (!(fib_flags & FIB_LOOKUP_NOREF)) refcount_inc(&fi->fib_clntref); @@ -1568,6 +1588,7 @@ out_reject: return err; } } +miss: #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_miss); #endif -- cgit v1.2.3 From 1fd1c768f3624a5e66766e7b4ddb9b607cd834a5 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 26 May 2020 12:56:18 -0600 Subject: ipv4: nexthop version of fib_info_nh_uses_dev Similar to the last path, need to fix fib_info_nh_uses_dev for external nexthops to avoid referencing multiple nh_grp structs. Move the device check in fib_info_nh_uses_dev to a helper and create a nexthop version that is called if the fib_info uses an external nexthop. Fixes: 430a049190de ("nexthop: Add support for nexthop groups") Signed-off-by: David Ahern Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/ip_fib.h | 10 ++++++++++ include/net/nexthop.h | 25 +++++++++++++++++++++++++ net/ipv4/fib_frontend.c | 19 ++++++++++--------- 3 files changed, 45 insertions(+), 9 deletions(-) (limited to 'include/net') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 771ce068bc96..2ec062aaa978 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -447,6 +447,16 @@ static inline int fib_num_tclassid_users(struct net *net) #endif int fib_unmerge(struct net *net); +static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, +const struct net_device *dev) +{ + if (nhc->nhc_dev == dev || + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) + return true; + + return false; +} + /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 9414ae46fc1c..8c9f1a718859 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -266,6 +266,31 @@ struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, return NULL; } +static inline bool nexthop_uses_dev(const struct nexthop *nh, + const struct net_device *dev) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nhg = rcu_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + + nhi = rcu_dereference(nhe->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + } else { + nhi = rcu_dereference(nh->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + + return false; +} + static inline unsigned int fib_info_num_path(const struct fib_info *fi) { if (unlikely(fi->nh)) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 1bf9da3a75f9..41079490a118 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -309,17 +309,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) { bool dev_match = false; #ifdef CONFIG_IP_ROUTE_MULTIPATH - int ret; + if (unlikely(fi->nh)) { + dev_match = nexthop_uses_dev(fi->nh, dev); + } else { + int ret; - for (ret = 0; ret < fib_info_num_path(fi); ret++) { - const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); + for (ret = 0; ret < fib_info_num_path(fi); ret++) { + const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); - if (nhc->nhc_dev == dev) { - dev_match = true; - break; - } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { - dev_match = true; - break; + if (nhc_l3mdev_matches_dev(nhc, dev)) { + dev_match = true; + break; + } } } #else -- cgit v1.2.3