diff options
author | Era Mayflower <mayflowerera@gmail.com> | 2020-03-09 19:47:01 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-16 01:42:31 -0700 |
commit | a21ecf0e033807b976967286e6c392f48ee2049f (patch) | |
tree | 066f5ca68d02329bb11df2ba1e95ed1da63071d8 /drivers/net/macsec.c | |
parent | 65b7a2c8e3691b4b1d5d4b64f3c39dff3670975a (diff) | |
download | linux-a21ecf0e033807b976967286e6c392f48ee2049f.tar.bz2 |
macsec: Support XPN frame handling - IEEE 802.1AEbw
Support extended packet number cipher suites (802.1AEbw) frames handling.
This does not include the needed netlink patches.
* Added xpn boolean field to `struct macsec_secy`.
* Added ssci field to `struct_macsec_tx_sa` (802.1AE figure 10-5).
* Added ssci field to `struct_macsec_rx_sa` (802.1AE figure 10-5).
* Added salt field to `struct macsec_key` (802.1AE 10.7 NOTE 1).
* Created pn_t type for easy access to lower and upper halves.
* Created salt_t type for easy access to the "ssci" and "pn" parts.
* Created `macsec_fill_iv_xpn` function to create IV in XPN mode.
* Support in PN recovery and preliminary replay check in XPN mode.
In addition, according to IEEE 802.1AEbw figure 10-5, the PN of incoming
frame can be 0 when XPN cipher suite is used, so fixed the function
`macsec_validate_skb` to fail on PN=0 only if XPN is off.
Signed-off-by: Era Mayflower <mayflowerera@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/macsec.c')
-rw-r--r-- | drivers/net/macsec.c | 130 |
1 files changed, 94 insertions, 36 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 6ec6fc191a6e..6c71e250cccb 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -19,6 +19,7 @@ #include <net/gro_cells.h> #include <net/macsec.h> #include <linux/phy.h> +#include <linux/byteorder/generic.h> #include <uapi/linux/if_macsec.h> @@ -68,6 +69,16 @@ struct macsec_eth_header { sc; \ sc = rtnl_dereference(sc->next)) +#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) + +struct gcm_iv_xpn { + union { + u8 short_secure_channel_id[4]; + ssci_t ssci; + }; + __be64 pn; +} __packed; + struct gcm_iv { union { u8 secure_channel_id[8]; @@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, return __macsec_get_ops(macsec->offload, macsec, ctx); } -/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ -static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) +/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ +static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) { struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; int len = skb->len - 2 * ETH_ALEN; @@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) if (h->unused) return false; - /* rx.pn != 0 (figure 10-5) */ - if (!h->packet_number) + /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ + if (!h->packet_number && !xpn) return false; /* length check, f) g) h) i) */ @@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN +static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, + salt_t salt) +{ + struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; + + gcm_iv->ssci = ssci ^ salt.ssci; + gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; +} + static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) { struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; @@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) } EXPORT_SYMBOL_GPL(macsec_pn_wrapped); -static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) +static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, + struct macsec_secy *secy) { - u32 pn; + pn_t pn; spin_lock_bh(&tx_sa->lock); - pn = tx_sa->next_pn; - tx_sa->next_pn++; + pn = tx_sa->next_pn_halves; + if (secy->xpn) + tx_sa->next_pn++; + else + tx_sa->next_pn_halves.lower++; + if (tx_sa->next_pn == 0) __macsec_pn_wrapped(secy, tx_sa); spin_unlock_bh(&tx_sa->lock); @@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, struct macsec_tx_sa *tx_sa; struct macsec_dev *macsec = macsec_priv(dev); bool sci_present; - u32 pn; + pn_t pn; secy = &macsec->secy; tx_sc = &secy->tx_sc; @@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, memmove(hh, eth, 2 * ETH_ALEN); pn = tx_sa_update_pn(tx_sa, secy); - if (pn == 0) { + if (pn.full64 == 0) { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-ENOLINK); } - macsec_fill_sectag(hh, secy, pn, sci_present); + macsec_fill_sectag(hh, secy, pn.lower, sci_present); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); skb_put(skb, secy->icv_len); @@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, return ERR_PTR(-ENOMEM); } - macsec_fill_iv(iv, secy->sci, pn); + if (secy->xpn) + macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); + else + macsec_fill_iv(iv, secy->sci, pn.lower); sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u32 lowest_pn = 0; spin_lock(&rx_sa->lock); - if (rx_sa->next_pn >= secy->replay_window) - lowest_pn = rx_sa->next_pn - secy->replay_window; + if (rx_sa->next_pn_halves.lower >= secy->replay_window) + lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; /* Now perform replay protection check again * (see IEEE 802.1AE-2006 figure 10-5) */ - if (secy->replay_protect && pn < lowest_pn) { + if (secy->replay_protect && pn < lowest_pn && + (!secy->xpn || pn_same_half(pn, lowest_pn))) { spin_unlock(&rx_sa->lock); u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; @@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u } u64_stats_update_end(&rxsc_stats->syncp); - if (pn >= rx_sa->next_pn) - rx_sa->next_pn = pn + 1; + // Instead of "pn >=" - to support pn overflow in xpn + if (pn + 1 > rx_sa->next_pn_halves.lower) { + rx_sa->next_pn_halves.lower = pn + 1; + } else if (secy->xpn && + !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { + rx_sa->next_pn_halves.upper++; + rx_sa->next_pn_halves.lower = pn + 1; + } + spin_unlock(&rx_sa->lock); } @@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, unsigned char *iv; struct aead_request *req; struct macsec_eth_header *hdr; + u32 hdr_pn; u16 icv_len = secy->icv_len; macsec_skb_cb(skb)->valid = false; @@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, } hdr = (struct macsec_eth_header *)skb->data; - macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); + hdr_pn = ntohl(hdr->packet_number); + + if (secy->xpn) { + pn_t recovered_pn = rx_sa->next_pn_halves; + + recovered_pn.lower = hdr_pn; + if (hdr_pn < rx_sa->next_pn_halves.lower && + !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) + recovered_pn.upper++; + + macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, + rx_sa->key.salt); + } else { + macsec_fill_iv(iv, sci, hdr_pn); + } sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rxh_data *rxd; struct macsec_dev *macsec; sci_t sci; - u32 pn; + u32 hdr_pn; bool cbit; struct pcpu_rx_sc_stats *rxsc_stats; struct pcpu_secy_stats *secy_stats; @@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) secy_stats = this_cpu_ptr(macsec->stats); rxsc_stats = this_cpu_ptr(rx_sc->stats); - if (!macsec_validate_skb(skb, secy->icv_len)) { + if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); @@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) } /* First, PN check to avoid decrypting obviously wrong packets */ - pn = ntohl(hdr->packet_number); + hdr_pn = ntohl(hdr->packet_number); if (secy->replay_protect) { bool late; spin_lock(&rx_sa->lock); - late = rx_sa->next_pn >= secy->replay_window && - pn < (rx_sa->next_pn - secy->replay_window); + late = rx_sa->next_pn_halves.lower >= secy->replay_window && + hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); + + if (secy->xpn) + late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); spin_unlock(&rx_sa->lock); if (late) { @@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; } - if (!macsec_post_decrypt(skb, secy, pn)) + if (!macsec_post_decrypt(skb, secy, hdr_pn)) goto drop; deliver: @@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); - rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } @@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) } spin_lock_bh(&tx_sa->lock); - tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) @@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) u8 assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; bool was_operational, was_active; - u32 prev_pn = 0; + pn_t prev_pn; int ret = 0; + prev_pn.full64 = 0; + if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&tx_sa->lock); - prev_pn = tx_sa->next_pn; - tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + prev_pn = tx_sa->next_pn_halves; + tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); } @@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) cleanup: if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&tx_sa->lock); - tx_sa->next_pn = prev_pn; + tx_sa->next_pn_halves = prev_pn; spin_unlock_bh(&tx_sa->lock); } tx_sa->active = was_active; @@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; bool was_active; - u32 prev_pn = 0; + pn_t prev_pn; int ret = 0; + prev_pn.full64 = 0; + if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); - prev_pn = rx_sa->next_pn; - rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + prev_pn = rx_sa->next_pn_halves; + rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } @@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) cleanup: if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); - rx_sa->next_pn = prev_pn; + rx_sa->next_pn_halves = prev_pn; spin_unlock_bh(&rx_sa->lock); } rx_sa->active = was_active; @@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, } if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || - nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || + nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_nest_cancel(skb, txsa_nest); @@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, nla_nest_end(skb, attr); if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || - nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || + nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_nest_cancel(skb, rxsa_nest); |